Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion exp/controllers/rosamachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,14 +366,23 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope
return nil
}

func (r *ROSAMachinePoolReconciler) shouldUpdateRosaReplicas(machinePoolScope *scope.RosaMachinePoolScope, nodePool *cmv1.NodePool) bool {
if machinePoolScope.MachinePool.Spec.Replicas == nil || machinePoolScope.RosaMachinePool.Spec.Autoscaling != nil || annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) {
return false
}

return nodePool.Replicas() != int(*machinePoolScope.MachinePool.Spec.Replicas)
}

func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaMachinePoolScope, ocmClient rosa.OCMClient, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) {
machinePool := machinePoolScope.RosaMachinePool.DeepCopy()
// default all fields before comparing, so that nil/unset fields don't cause an unnecessary update call.
machinePool.Default()
desiredSpec := machinePool.Spec

specDiff := computeSpecDiff(desiredSpec, nodePool)
if specDiff == "" {
// Replicas are not part of RosaMachinePoolSpec
if specDiff == "" && !r.shouldUpdateRosaReplicas(machinePoolScope, nodePool) {
// no changes detected.
return nodePool, nil
}
Expand Down
218 changes: 194 additions & 24 deletions exp/controllers/rosamachinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,16 +218,18 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
}

tests := []struct {
name string
new *expinfrav1.ROSAMachinePool
old *expinfrav1.ROSAMachinePool
expect func(m *mocks.MockOCMClientMockRecorder)
result reconcile.Result
name string
newROSAMachinePool *expinfrav1.ROSAMachinePool
oldROSAMachinePool *expinfrav1.ROSAMachinePool
machinePool *expclusterv1.MachinePool
expect func(m *mocks.MockOCMClientMockRecorder)
result reconcile.Result
}{
{
name: "create node pool, nodepool doesn't exist",
old: rosaMachinePool(0),
new: &expinfrav1.ROSAMachinePool{
name: "create node pool, nodepool doesn't exist",
machinePool: ownerMachinePool(0),
oldROSAMachinePool: rosaMachinePool(0),
newROSAMachinePool: &expinfrav1.ROSAMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "rosa-machinepool",
Namespace: ns.Name,
Expand Down Expand Up @@ -259,9 +261,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
},
},
{
name: "Nodepool exist, but is not ready",
old: rosaMachinePool(1),
new: &expinfrav1.ROSAMachinePool{
name: "Nodepool exist, but is not ready",
machinePool: ownerMachinePool(1),
oldROSAMachinePool: rosaMachinePool(1),
newROSAMachinePool: &expinfrav1.ROSAMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "rosa-machinepool",
Namespace: ns.Name,
Expand Down Expand Up @@ -297,9 +300,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
},
},
{
name: "Nodepool is ready",
old: rosaMachinePool(2),
new: &expinfrav1.ROSAMachinePool{
name: "Nodepool is ready",
machinePool: ownerMachinePool(2),
oldROSAMachinePool: rosaMachinePool(2),
newROSAMachinePool: &expinfrav1.ROSAMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "rosa-machinepool",
Namespace: ns.Name,
Expand Down Expand Up @@ -343,6 +347,151 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
m.CreateNodePool(gomock.Any(), gomock.Any()).Times(0)
},
},
{
name: "Create nodepool, replicas are set in MachinePool",
machinePool: &expclusterv1.MachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: ownerMachinePool(3).Name,
Namespace: ns.Name,
Labels: map[string]string{clusterv1.ClusterNameLabel: ownerCluster(3).Name},
UID: types.UID("owner-mp-uid--3"),
},
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: clusterv1.GroupVersion.String(),
},
Spec: expclusterv1.MachinePoolSpec{
ClusterName: ownerCluster(3).Name,
Replicas: ptr.To[int32](2),
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: ownerCluster(3).Name,
InfrastructureRef: corev1.ObjectReference{
UID: rosaMachinePool(3).UID,
Name: rosaMachinePool(3).Name,
Namespace: ns.Namespace,
Kind: "ROSAMachinePool",
APIVersion: expclusterv1.GroupVersion.String(),
},
},
},
},
},
oldROSAMachinePool: rosaMachinePool(3),
newROSAMachinePool: &expinfrav1.ROSAMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "rosa-machinepool",
Namespace: ns.Name,
UID: "rosa-machinepool",
},
TypeMeta: metav1.TypeMeta{
Kind: "ROSAMachinePool",
APIVersion: expinfrav1.GroupVersion.String(),
},
Spec: expinfrav1.RosaMachinePoolSpec{
NodePoolName: "test-nodepool",
Version: "4.14.5",
Subnet: "subnet-id",
InstanceType: "m5.large",
},
Status: expinfrav1.RosaMachinePoolStatus{
Ready: false,
ID: rosaMachinePool(3).Spec.NodePoolName,
},
},
result: ctrl.Result{},
expect: func(m *mocks.MockOCMClientMockRecorder) {
m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) {
return nil, false, nil
}).Times(1)
m.CreateNodePool(gomock.Any(), matchesReplicas(2)).DoAndReturn(func(clusterId string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) {
return nodePool, nil
}).Times(1)
},
},
{
name: "Update nodepool, replicas are updated from MachinePool",
machinePool: &expclusterv1.MachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: ownerMachinePool(4).Name,
Namespace: ns.Name,
Labels: map[string]string{clusterv1.ClusterNameLabel: ownerCluster(4).Name},
UID: types.UID("owner-mp-uid--4"),
},
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: clusterv1.GroupVersion.String(),
},
Spec: expclusterv1.MachinePoolSpec{
ClusterName: ownerCluster(4).Name,
Replicas: ptr.To[int32](2),
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: ownerCluster(4).Name,
InfrastructureRef: corev1.ObjectReference{
UID: rosaMachinePool(4).UID,
Name: rosaMachinePool(4).Name,
Namespace: ns.Namespace,
Kind: "ROSAMachinePool",
APIVersion: expclusterv1.GroupVersion.String(),
},
},
},
},
},
oldROSAMachinePool: rosaMachinePool(4),
newROSAMachinePool: &expinfrav1.ROSAMachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "rosa-machinepool",
Namespace: ns.Name,
UID: "rosa-machinepool",
},
TypeMeta: metav1.TypeMeta{
Kind: "ROSAMachinePool",
APIVersion: expinfrav1.GroupVersion.String(),
},
Spec: expinfrav1.RosaMachinePoolSpec{
NodePoolName: "test-nodepool",
Version: "4.14.5",
Subnet: "subnet-id",
InstanceType: "m5.large",
},
Status: expinfrav1.RosaMachinePoolStatus{
// Ready: false,
Ready: true,
Replicas: 2,
},
},
result: ctrl.Result{},
expect: func(m *mocks.MockOCMClientMockRecorder) {
m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) {
rosaSpec := rosaMachinePool(4).Spec
rosaSpec.UpdateConfig = &expinfrav1.RosaUpdateConfig{
RollingUpdate: &expinfrav1.RollingUpdate{
MaxSurge: ptr.To(intstr.FromInt32(1)),
MaxUnavailable: ptr.To(intstr.FromInt32(0)),
},
}
nodePoolBuilder := nodePoolBuilder(rosaSpec, ownerMachinePool(4).Spec, rosacontrolplanev1.Stable)
statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(1)
nodeGrace := (&cmv1.ValueBuilder{}).Unit("s").Value(0)
nodePool, err := nodePoolBuilder.ID("test-nodepool").Replicas(1).Status(statusBuilder).AutoRepair(true).NodeDrainGracePeriod(nodeGrace).Build()
g.Expect(err).NotTo(HaveOccurred())

return nodePool, true, nil
}).Times(1)
m.UpdateNodePool(gomock.Any(), matchesReplicas(2)).DoAndReturn(func(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) {
statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(2)
version := (&cmv1.VersionBuilder{}).RawID("4.14.5")
npBuilder := cmv1.NodePoolBuilder{}
updatedNodePool, err := npBuilder.Copy(nodePool).Status(statusBuilder).Version(version).Build()
g.Expect(err).NotTo(HaveOccurred())

return updatedNodePool, nil
}).Times(1)
m.CreateNodePool(gomock.Any(), gomock.Any()).Times(0)
},
},
}

createObject(g, secret, ns.Name)
Expand All @@ -353,7 +502,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
for i, test := range tests {
t.Run(test.name, func(t *testing.T) {
// This is set by CAPI MachinePool reconcile
test.old.OwnerReferences = []metav1.OwnerReference{
test.oldROSAMachinePool.OwnerReferences = []metav1.OwnerReference{
{
Name: ownerMachinePool(i).Name,
UID: ownerMachinePool(i).UID,
Expand All @@ -362,7 +511,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
},
}
cp := rosaControlPlane(i)
objects := []client.Object{ownerCluster(i), ownerMachinePool(i), cp, test.old}
objects := []client.Object{ownerCluster(i), test.machinePool, cp, test.oldROSAMachinePool}

for _, obj := range objects {
createObject(g, obj, ns.Name)
Expand All @@ -374,16 +523,16 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
g.Expect(err).ShouldNot(HaveOccurred())

// patch status conditions
rmpPh, err := patch.NewHelper(test.old, testEnv)
test.old.Status.Conditions = clusterv1.Conditions{
rmpPh, err := patch.NewHelper(test.oldROSAMachinePool, testEnv)
test.oldROSAMachinePool.Status.Conditions = clusterv1.Conditions{
{
Type: "Paused",
Status: corev1.ConditionFalse,
Reason: "NotPaused",
},
}

g.Expect(rmpPh.Patch(ctx, test.old)).To(Succeed())
g.Expect(rmpPh.Patch(ctx, test.oldROSAMachinePool)).To(Succeed())
g.Expect(err).ShouldNot(HaveOccurred())

// patching is not reliably synchronous
Expand All @@ -410,20 +559,20 @@ func TestRosaMachinePoolReconcile(t *testing.T) {
}

req := ctrl.Request{}
req.NamespacedName = types.NamespacedName{Name: test.old.Name, Namespace: ns.Name}
req.NamespacedName = types.NamespacedName{Name: test.oldROSAMachinePool.Name, Namespace: ns.Name}

result, errReconcile := r.Reconcile(ctx, req)
g.Expect(errReconcile).ToNot(HaveOccurred())
g.Expect(result).To(Equal(test.result))
time.Sleep(50 * time.Millisecond)

m := &expinfrav1.ROSAMachinePool{}
key := client.ObjectKey{Name: test.old.Name, Namespace: ns.Name}
key := client.ObjectKey{Name: test.oldROSAMachinePool.Name, Namespace: ns.Name}
errGet := testEnv.Get(ctx, key, m)
g.Expect(errGet).NotTo(HaveOccurred())
g.Expect(m.Status.Ready).To(Equal(test.new.Status.Ready))
g.Expect(m.Status.Replicas).To(Equal(test.new.Status.Replicas))
g.Expect(m.Status.ID).To(Equal(test.new.Status.ID))
g.Expect(m.Status.Ready).To(Equal(test.newROSAMachinePool.Status.Ready))
g.Expect(m.Status.Replicas).To(Equal(test.newROSAMachinePool.Status.Replicas))
g.Expect(m.Status.ID).To(Equal(test.newROSAMachinePool.Status.ID))

// cleanup
for _, obj := range objects {
Expand Down Expand Up @@ -556,3 +705,24 @@ func cleanupObject(g *WithT, obj client.Object) {
g.Expect(testEnv.Cleanup(ctx, obj)).To(Succeed())
}
}

type replicasMatcher struct {
replicas int
}

func (m replicasMatcher) Matches(arg interface{}) bool {
sarg := arg.(*cmv1.NodePool)
if sarg != nil && sarg.Replicas() == m.replicas {
return true
}
return false
}

// Not used here, but satisfies the Matcher interface.
func (m replicasMatcher) String() string {
return fmt.Sprintf("Replicas %v", m.replicas)
}

func matchesReplicas(replicas int) gomock.Matcher {
return replicasMatcher{replicas: replicas}
}
2 changes: 1 addition & 1 deletion templates/cluster-template-rosa-machinepool.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ metadata:
name: "${CLUSTER_NAME}-pool-0"
spec:
clusterName: "${CLUSTER_NAME}"
replicas: 1
replicas: 3
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should add logic in the func shouldUpdateRosaReplicas if the user set the replica to 1 in the machinePool saying ROSAMachinePool must have at least 3 replica per zone and machinePool replica changes will be ignored

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Setting 1 replica is ok, if there isnt the only machine pool. If there is the only one machine pool, number of replicas will not go lower than 2. If you try to set it to 1, reconcile with end with an error. I think that is OK behavior.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm okay then, since we don't add default MP most likely we will not have this issue. We may re-visit this when we work on adding default MP.

template:
spec:
clusterName: "${CLUSTER_NAME}"
Expand Down
Loading