Skip to content

Commit fdeac27

Browse files
committed
Pass through Pod Affinity Rules
Allow Pod Affinity Rules to be defined in the Limitador CR which will be past to the Limitador deployment CR and place in `spec.template.spec.affinity`.
1 parent 0fa98ed commit fdeac27

File tree

9 files changed

+1772
-31
lines changed

9 files changed

+1772
-31
lines changed

api/v1alpha1/limitador_types.go

+3
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ type LimitadorSpec struct {
4444
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
4545
// Important: Run "make" to regenerate code after modifying this file
4646

47+
// +optional
48+
Affinity *corev1.Affinity `json:"affinity,omitempty"`
49+
4750
// +optional
4851
Replicas *int `json:"replicas,omitempty"`
4952

api/v1alpha1/zz_generated.deepcopy.go

+5
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

bundle/manifests/limitador.kuadrant.io_limitadors.yaml

+828
Large diffs are not rendered by default.

config/crd/bases/limitador.kuadrant.io_limitadors.yaml

+828
Large diffs are not rendered by default.

config/manager/manager.yaml

+30-30
Original file line numberDiff line numberDiff line change
@@ -25,35 +25,35 @@ spec:
2525
securityContext:
2626
runAsNonRoot: true
2727
containers:
28-
- command:
29-
- /manager
30-
args:
31-
- --leader-elect
32-
env:
33-
- name: RELATED_IMAGE_LIMITADOR
34-
value: "quay.io/kuadrant/limitador:latest"
35-
image: controller:latest
36-
name: manager
37-
securityContext:
38-
allowPrivilegeEscalation: false
39-
livenessProbe:
40-
httpGet:
41-
path: /healthz
42-
port: 8081
43-
initialDelaySeconds: 15
44-
periodSeconds: 20
45-
readinessProbe:
46-
httpGet:
47-
path: /readyz
48-
port: 8081
49-
initialDelaySeconds: 5
50-
periodSeconds: 10
51-
resources:
52-
limits:
53-
cpu: 200m
54-
memory: 300Mi
55-
requests:
56-
cpu: 200m
57-
memory: 200Mi
28+
- command:
29+
- /manager
30+
args:
31+
- --leader-elect
32+
env:
33+
- name: RELATED_IMAGE_LIMITADOR
34+
value: "quay.io/kuadrant/limitador:latest"
35+
image: controller:latest
36+
name: manager
37+
securityContext:
38+
allowPrivilegeEscalation: false
39+
livenessProbe:
40+
httpGet:
41+
path: /healthz
42+
port: 8081
43+
initialDelaySeconds: 15
44+
periodSeconds: 20
45+
readinessProbe:
46+
httpGet:
47+
path: /readyz
48+
port: 8081
49+
initialDelaySeconds: 5
50+
periodSeconds: 10
51+
resources:
52+
limits:
53+
cpu: 200m
54+
memory: 300Mi
55+
requests:
56+
cpu: 200m
57+
memory: 200Mi
5858
serviceAccountName: controller-manager
5959
terminationGracePeriodSeconds: 10

controllers/limitador_controller.go

+1
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ func (r *LimitadorReconciler) reconcileDeployment(ctx context.Context, limitador
145145
deploymentMutators = append(deploymentMutators,
146146
reconcilers.DeploymentImageMutator,
147147
reconcilers.DeploymentCommandMutator,
148+
reconcilers.DeploymentAffinityMutator,
148149
)
149150

150151
deployment := limitador.Deployment(limitadorObj, storageConfigSecret)

controllers/limitador_controller_test.go

+62-1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,23 @@ var _ = Describe("Limitador controller", func() {
4040
version := LimitadorVersion
4141
httpPort := &limitadorv1alpha1.TransportProtocol{Port: &httpPortNumber}
4242
grpcPort := &limitadorv1alpha1.TransportProtocol{Port: &grpcPortNumber}
43+
affinity := &v1.Affinity{
44+
PodAntiAffinity: &v1.PodAntiAffinity{
45+
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
46+
{
47+
Weight: 100,
48+
PodAffinityTerm: v1.PodAffinityTerm{
49+
LabelSelector: &metav1.LabelSelector{
50+
MatchLabels: map[string]string{
51+
"app.kubernetes.io/name": "limitador",
52+
},
53+
},
54+
TopologyKey: "kubernetes.io/hostname",
55+
},
56+
},
57+
},
58+
},
59+
}
4360

4461
limits := []limitadorv1alpha1.RateLimit{
4562
{
@@ -74,6 +91,7 @@ var _ = Describe("Limitador controller", func() {
7491
Spec: limitadorv1alpha1.LimitadorSpec{
7592
Replicas: &replicas,
7693
Version: &version,
94+
Affinity: affinity,
7795
Listener: &limitadorv1alpha1.Listener{
7896
HTTP: httpPort,
7997
GRPC: grpcPort,
@@ -169,6 +187,9 @@ var _ = Describe("Limitador controller", func() {
169187
},
170188
),
171189
)
190+
Expect(createdLimitadorDeployment.Spec.Template.Spec.Affinity).Should(
191+
Equal(affinity),
192+
)
172193
})
173194

174195
It("Should create a Limitador service", func() {
@@ -259,6 +280,8 @@ var _ = Describe("Limitador controller", func() {
259280
updatedLimitador.Spec.Replicas = &replicas
260281
version = "latest"
261282
updatedLimitador.Spec.Version = &version
283+
affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 99
284+
updatedLimitador.Spec.Affinity = affinity
262285

263286
Expect(k8sClient.Update(context.TODO(), &updatedLimitador)).Should(Succeed())
264287
updatedLimitadorDeployment := appsv1.Deployment{}
@@ -277,8 +300,46 @@ var _ = Describe("Limitador controller", func() {
277300

278301
correctReplicas := *updatedLimitadorDeployment.Spec.Replicas == LimitadorReplicas+1
279302
correctImage := updatedLimitadorDeployment.Spec.Template.Spec.Containers[0].Image == LimitadorImage+":latest"
303+
correctAffinity := updatedLimitadorDeployment.Spec.Template.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight == 99
304+
305+
return correctReplicas && correctImage && correctAffinity
306+
}, timeout, interval).Should(BeTrue())
307+
})
308+
309+
It("Should modify limitador deployments if nil object set", func() {
310+
updatedLimitador := limitadorv1alpha1.Limitador{}
311+
Eventually(func() bool {
312+
err := k8sClient.Get(
313+
context.TODO(),
314+
types.NamespacedName{
315+
Namespace: LimitadorNamespace,
316+
Name: limitadorObj.Name,
317+
},
318+
&updatedLimitador)
319+
320+
return err == nil
321+
}, timeout, interval).Should(BeTrue())
322+
323+
updatedLimitador.Spec.Affinity = nil
324+
325+
Expect(k8sClient.Update(context.TODO(), &updatedLimitador)).Should(Succeed())
326+
updatedLimitadorDeployment := appsv1.Deployment{}
327+
Eventually(func() bool {
328+
err := k8sClient.Get(
329+
context.TODO(),
330+
types.NamespacedName{
331+
Namespace: LimitadorNamespace,
332+
Name: limitadorObj.Name,
333+
},
334+
&updatedLimitadorDeployment)
335+
336+
if err != nil {
337+
return false
338+
}
339+
340+
correctAffinity := updatedLimitadorDeployment.Spec.Template.Spec.Affinity == nil
280341

281-
return correctReplicas && correctImage
342+
return correctAffinity
282343
}, timeout, interval).Should(BeTrue())
283344
})
284345

pkg/limitador/k8s_objects.go

+6
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,11 @@ func Deployment(limitador *limitadorv1alpha1.Limitador, storageConfigSecret *v1.
6060
replicas = int32(*limitador.Spec.Replicas)
6161
}
6262

63+
var affinity *v1.Affinity
64+
if limitador.Spec.Affinity != nil {
65+
affinity = limitador.Spec.Affinity
66+
}
67+
6368
image := GetLimitadorImageVersion()
6469
if limitador.Spec.Version != nil {
6570
image = fmt.Sprintf("%s:%s", LimitadorRepository, *limitador.Spec.Version)
@@ -85,6 +90,7 @@ func Deployment(limitador *limitadorv1alpha1.Limitador, storageConfigSecret *v1.
8590
Labels: labels(),
8691
},
8792
Spec: v1.PodSpec{
93+
Affinity: affinity,
8894
Containers: []v1.Container{
8995
{
9096
Name: "limitador",

pkg/reconcilers/deployment.go

+9
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,15 @@ func DeploymentMutator(opts ...DeploymentMutateFn) MutateFn {
3434
}
3535
}
3636

37+
func DeploymentAffinityMutator(desired, existing *appsv1.Deployment) bool {
38+
update := false
39+
if !reflect.DeepEqual(existing.Spec.Template.Spec.Affinity, desired.Spec.Template.Spec.Affinity) {
40+
existing.Spec.Template.Spec.Affinity = desired.Spec.Template.Spec.Affinity
41+
update = true
42+
}
43+
return update
44+
}
45+
3746
func DeploymentReplicasMutator(desired, existing *appsv1.Deployment) bool {
3847
update := false
3948

0 commit comments

Comments
 (0)