Skip to content

Commit 972d597

Browse files
Merge pull request #91 from adam-cattermole/reconcile-pdb
2 parents 31388de + ad29718 commit 972d597

14 files changed

+405
-45
lines changed

Makefile

+12-1
Original file line numberDiff line numberDiff line change
@@ -303,14 +303,25 @@ local-setup: ## Deploy operator in local kind cluster
303303
$(MAKE) docker-build
304304
@echo "Deploying Limitador control plane"
305305
$(KIND) load docker-image ${IMG} --name ${KIND_CLUSTER_NAME}
306-
make deploy-develmode
306+
$(MAKE) deploy-develmode
307307
@echo "Wait for all deployments to be up"
308308
kubectl -n limitador-operator-system wait --timeout=300s --for=condition=Available deployments --all
309309

310310
.PHONY: local-cleanup
311311
local-cleanup: ## Clean up local kind cluster
312312
$(MAKE) kind-delete-cluster
313313

314+
.PHONY: local-redeploy
315+
local-redeploy: export IMG := limitador-operator:dev
316+
local-redeploy: ## re-deploy operator in local kind cluster
317+
$(MAKE) docker-build
318+
@echo "Deploying Limitador control plane"
319+
$(KIND) load docker-image ${IMG} --name ${KIND_CLUSTER_NAME}
320+
$(MAKE) deploy-develmode
321+
kubectl rollout restart deployment -n limitador-operator-system limitador-operator-controller-manager
322+
@echo "Wait for all deployments to be up"
323+
kubectl -n limitador-operator-system wait --timeout=300s --for=condition=Available deployments --all
324+
314325
##@ Code Style
315326

316327
.PHONY: run-lint

api/v1alpha1/limitador_types.go

+19
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"github.com/google/go-cmp/cmp"
2525
corev1 "k8s.io/api/core/v1"
2626
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/util/intstr"
2728

2829
"github.com/kuadrant/limitador-operator/pkg/helpers"
2930
)
@@ -61,6 +62,9 @@ type LimitadorSpec struct {
6162

6263
// +optional
6364
Limits []RateLimit `json:"limits,omitempty"`
65+
66+
// +optional
67+
PodDisruptionBudget *PodDisruptionBudgetType `json:"pdb,omitempty"`
6468
}
6569

6670
//+kubebuilder:object:root=true
@@ -288,3 +292,18 @@ func (s *LimitadorStatus) Equals(other *LimitadorStatus, logger logr.Logger) boo
288292
func init() {
289293
SchemeBuilder.Register(&Limitador{}, &LimitadorList{})
290294
}
295+
296+
type PodDisruptionBudgetType struct {
297+
// An eviction is allowed if at most "maxUnavailable" limitador pods
298+
// are unavailable after the eviction, i.e. even in absence of
299+
// the evicted pod. For example, one can prevent all voluntary evictions
300+
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
301+
// +optional
302+
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
303+
// An eviction is allowed if at least "minAvailable" limitador pods will
304+
// still be available after the eviction, i.e. even in the absence of
305+
// the evicted pod. So for example you can prevent all voluntary
306+
// evictions by specifying "100%".
307+
// +optional
308+
MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
309+
}

api/v1alpha1/zz_generated.deepcopy.go

+31
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

bundle/manifests/limitador-operator.clusterserviceversion.yaml

+11
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,17 @@ spec:
118118
- get
119119
- patch
120120
- update
121+
- apiGroups:
122+
- policy
123+
resources:
124+
- poddisruptionbudgets
125+
verbs:
126+
- create
127+
- delete
128+
- get
129+
- list
130+
- update
131+
- watch
121132
serviceAccountName: limitador-operator-controller-manager
122133
deployments:
123134
- label:

bundle/manifests/limitador.kuadrant.io_limitadors.yaml

+22
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,28 @@ spec:
7575
type: integer
7676
type: object
7777
type: object
78+
pdb:
79+
properties:
80+
maxUnavailable:
81+
anyOf:
82+
- type: integer
83+
- type: string
84+
description: An eviction is allowed if at most "maxUnavailable"
85+
limitador pods are unavailable after the eviction, i.e. even
86+
in absence of the evicted pod. For example, one can prevent
87+
all voluntary evictions by specifying 0. This is a mutually
88+
exclusive setting with "minAvailable".
89+
x-kubernetes-int-or-string: true
90+
minAvailable:
91+
anyOf:
92+
- type: integer
93+
- type: string
94+
description: An eviction is allowed if at least "minAvailable"
95+
limitador pods will still be available after the eviction, i.e.
96+
even in the absence of the evicted pod. So for example you
97+
can prevent all voluntary evictions by specifying "100%".
98+
x-kubernetes-int-or-string: true
99+
type: object
78100
rateLimitHeaders:
79101
description: RateLimitHeadersType defines the valid options for the
80102
--rate-limit-headers arg

config/crd/bases/limitador.kuadrant.io_limitadors.yaml

+22
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,28 @@ spec:
7676
type: integer
7777
type: object
7878
type: object
79+
pdb:
80+
properties:
81+
maxUnavailable:
82+
anyOf:
83+
- type: integer
84+
- type: string
85+
description: An eviction is allowed if at most "maxUnavailable"
86+
limitador pods are unavailable after the eviction, i.e. even
87+
in absence of the evicted pod. For example, one can prevent
88+
all voluntary evictions by specifying 0. This is a mutually
89+
exclusive setting with "minAvailable".
90+
x-kubernetes-int-or-string: true
91+
minAvailable:
92+
anyOf:
93+
- type: integer
94+
- type: string
95+
description: An eviction is allowed if at least "minAvailable"
96+
limitador pods will still be available after the eviction, i.e.
97+
even in the absence of the evicted pod. So for example you
98+
can prevent all voluntary evictions by specifying "100%".
99+
x-kubernetes-int-or-string: true
100+
type: object
79101
rateLimitHeaders:
80102
description: RateLimitHeadersType defines the valid options for the
81103
--rate-limit-headers arg

config/manager/manager.yaml

+30-30
Original file line numberDiff line numberDiff line change
@@ -25,35 +25,35 @@ spec:
2525
securityContext:
2626
runAsNonRoot: true
2727
containers:
28-
- command:
29-
- /manager
30-
args:
31-
- --leader-elect
32-
env:
33-
- name: RELATED_IMAGE_LIMITADOR
34-
value: "quay.io/kuadrant/limitador:latest"
35-
image: controller:latest
36-
name: manager
37-
securityContext:
38-
allowPrivilegeEscalation: false
39-
livenessProbe:
40-
httpGet:
41-
path: /healthz
42-
port: 8081
43-
initialDelaySeconds: 15
44-
periodSeconds: 20
45-
readinessProbe:
46-
httpGet:
47-
path: /readyz
48-
port: 8081
49-
initialDelaySeconds: 5
50-
periodSeconds: 10
51-
resources:
52-
limits:
53-
cpu: 200m
54-
memory: 300Mi
55-
requests:
56-
cpu: 200m
57-
memory: 200Mi
28+
- command:
29+
- /manager
30+
args:
31+
- --leader-elect
32+
env:
33+
- name: RELATED_IMAGE_LIMITADOR
34+
value: "quay.io/kuadrant/limitador:latest"
35+
image: controller:latest
36+
name: manager
37+
securityContext:
38+
allowPrivilegeEscalation: false
39+
livenessProbe:
40+
httpGet:
41+
path: /healthz
42+
port: 8081
43+
initialDelaySeconds: 15
44+
periodSeconds: 20
45+
readinessProbe:
46+
httpGet:
47+
path: /readyz
48+
port: 8081
49+
initialDelaySeconds: 5
50+
periodSeconds: 10
51+
resources:
52+
limits:
53+
cpu: 200m
54+
memory: 300Mi
55+
requests:
56+
cpu: 200m
57+
memory: 200Mi
5858
serviceAccountName: controller-manager
5959
terminationGracePeriodSeconds: 10

config/rbac/role.yaml

+11
Original file line numberDiff line numberDiff line change
@@ -65,3 +65,14 @@ rules:
6565
- get
6666
- patch
6767
- update
68+
- apiGroups:
69+
- policy
70+
resources:
71+
- poddisruptionbudgets
72+
verbs:
73+
- create
74+
- delete
75+
- get
76+
- list
77+
- update
78+
- watch

controllers/limitador_controller.go

+49
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
"github.com/go-logr/logr"
2626
appsv1 "k8s.io/api/apps/v1"
2727
v1 "k8s.io/api/core/v1"
28+
policyv1 "k8s.io/api/policy/v1"
2829
"k8s.io/apimachinery/pkg/api/errors"
2930
"k8s.io/apimachinery/pkg/types"
3031
ctrl "sigs.k8s.io/controller-runtime"
@@ -45,6 +46,7 @@ type LimitadorReconciler struct {
4546
//+kubebuilder:rbac:groups=limitador.kuadrant.io,resources=limitadors/status,verbs=get;update;patch
4647
//+kubebuilder:rbac:groups=limitador.kuadrant.io,resources=limitadors/finalizers,verbs=update
4748
//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;delete
49+
//+kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;delete
4850
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;delete
4951
//+kubebuilder:rbac:groups="",resources=configmaps;secrets,verbs=get;list;watch;create;update;delete
5052

@@ -116,9 +118,55 @@ func (r *LimitadorReconciler) reconcileSpec(ctx context.Context, limitadorObj *l
116118
return ctrl.Result{}, err
117119
}
118120

121+
if err := r.reconcilePdb(ctx, limitadorObj); err != nil {
122+
return ctrl.Result{}, err
123+
}
124+
119125
return ctrl.Result{}, nil
120126
}
121127

128+
func (r *LimitadorReconciler) reconcilePdb(ctx context.Context, limitadorObj *limitadorv1alpha1.Limitador) error {
129+
logger, err := logr.FromContext(ctx)
130+
if err != nil {
131+
return err
132+
}
133+
if limitadorObj.Spec.PodDisruptionBudget == nil {
134+
pdb := &policyv1.PodDisruptionBudget{}
135+
if err := r.GetResource(ctx,
136+
types.NamespacedName{
137+
Namespace: limitadorObj.Namespace,
138+
Name: limitador.PodDisruptionBudgetName(limitadorObj),
139+
}, pdb); err != nil {
140+
if errors.IsNotFound(err) {
141+
return nil
142+
}
143+
return err
144+
}
145+
if pdb.ObjectMeta.DeletionTimestamp == nil {
146+
if err = r.DeleteResource(ctx, pdb); err != nil {
147+
return err
148+
}
149+
}
150+
return nil
151+
}
152+
153+
pdb := limitador.PodDisruptionBudget(limitadorObj)
154+
if err := limitador.ValidatePDB(pdb); err != nil {
155+
return err
156+
}
157+
158+
// controller reference
159+
if err := r.SetOwnerReference(limitadorObj, pdb); err != nil {
160+
return err
161+
}
162+
err = r.ReconcilePodDisruptionBudget(ctx, pdb, reconcilers.PodDisruptionBudgetMutator)
163+
logger.V(1).Info("reconcile pdb", "error", err)
164+
if err != nil {
165+
return err
166+
}
167+
return nil
168+
}
169+
122170
func (r *LimitadorReconciler) reconcileDeployment(ctx context.Context, limitadorObj *limitadorv1alpha1.Limitador) error {
123171
logger, err := logr.FromContext(ctx)
124172
if err != nil {
@@ -213,6 +261,7 @@ func (r *LimitadorReconciler) SetupWithManager(mgr ctrl.Manager) error {
213261
For(&limitadorv1alpha1.Limitador{}).
214262
Owns(&appsv1.Deployment{}).
215263
Owns(&v1.ConfigMap{}).
264+
Owns(&policyv1.PodDisruptionBudget{}).
216265
Complete(r)
217266
}
218267

0 commit comments

Comments
 (0)