diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index e4bda950ca..a0c073e435 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -24,4 +24,3 @@ aliases: - mdbooth cluster-api-openstack-reviewers: - emilienm - - dulek diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index ee923ec812..230ecd5342 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -776,17 +776,22 @@ func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, ope } if instanceStatus == nil { // Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status - if instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name); err == nil { - if instanceStatus != nil { - return instanceStatus, nil - } - if openStackMachine.Status.InstanceID != nil { - logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available") - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") - openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists")) - return nil, nil - } + instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) + if err != nil { + logger.Info("Unable to get OpenStack instance by name", "name", openStackMachine.Name) + conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, err + } + if instanceStatus != nil { + return instanceStatus, nil } + if openStackMachine.Status.InstanceID != nil { + logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available") + conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") + openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists")) + return nil, nil + } + instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) if err != nil { return nil, err diff --git a/docs/book/src/clusteropenstack/configuration.md b/docs/book/src/clusteropenstack/configuration.md index cb9e20360a..193720927e 100644 --- a/docs/book/src/clusteropenstack/configuration.md +++ b/docs/book/src/clusteropenstack/configuration.md @@ -565,6 +565,7 @@ permitted from anywhere, as with the default rules). We can add security group rules that authorize traffic from all nodes via `allNodesSecurityGroupRules`. It takes a list of security groups rules that should be applied to selected nodes. The following rule fields are mutually exclusive: `remoteManagedGroups`, `remoteGroupID` and `remoteIPPrefix`. +If none of these fields are set, the rule will have a remote IP prefix of `0.0.0.0/0` per Neutron default. Valid values for `remoteManagedGroups` are `controlplane`, `worker` and `bastion`. diff --git a/docs/book/src/topics/crd-changes/v1alpha7-to-v1beta1.md b/docs/book/src/topics/crd-changes/v1alpha7-to-v1beta1.md index 975d226275..9d1aaaad02 100644 --- a/docs/book/src/topics/crd-changes/v1alpha7-to-v1beta1.md +++ b/docs/book/src/topics/crd-changes/v1alpha7-to-v1beta1.md @@ -396,6 +396,9 @@ The field `managedSecurityGroups` is now a pointer to a `ManagedSecurityGroups` Also, we can now add security group rules that authorize traffic from all nodes via `allNodesSecurityGroupRules`. It takes a list of security groups rules that should be applied to selected nodes. The following rule fields are mutually exclusive: `remoteManagedGroups`, `remoteGroupID` and `remoteIPPrefix`. +If none of these fields are set, the rule will have a remote IP prefix of `0.0.0.0/0` per Neutron default. + +```yaml Valid values for `remoteManagedGroups` are `controlplane`, `worker` and `bastion`. Also, `OpenStackCluster.Spec.AllowAllInClusterTraffic` moved under `ManagedSecurityGroups`. diff --git a/pkg/cloud/services/loadbalancer/loadbalancer.go b/pkg/cloud/services/loadbalancer/loadbalancer.go index 11fcf3c286..ccdca0c0d9 100644 --- a/pkg/cloud/services/loadbalancer/loadbalancer.go +++ b/pkg/cloud/services/loadbalancer/loadbalancer.go @@ -92,9 +92,10 @@ func (s *Service) ReconcileLoadBalancer(openStackCluster *infrav1.OpenStackClust if lb.ProvisioningStatus != loadBalancerProvisioningStatusActive { var err error - lb, err = s.waitForLoadBalancerActive(lb.ID) + lbID := lb.ID + lb, err = s.waitForLoadBalancerActive(lbID) if err != nil { - return false, fmt.Errorf("load balancer %q with id %s is not active after timeout: %v", loadBalancerName, lb.ID, err) + return false, fmt.Errorf("load balancer %q with id %s is not active after timeout: %v", loadBalancerName, lbID, err) } } diff --git a/pkg/cloud/services/loadbalancer/loadbalancer_test.go b/pkg/cloud/services/loadbalancer/loadbalancer_test.go index bc8317e61e..4b43a6d488 100644 --- a/pkg/cloud/services/loadbalancer/loadbalancer_test.go +++ b/pkg/cloud/services/loadbalancer/loadbalancer_test.go @@ -45,6 +45,13 @@ func Test_ReconcileLoadBalancer(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() + // Shortcut wait timeout + backoffDurationPrev := backoff.Duration + backoff.Duration = 0 + defer func() { + backoff.Duration = backoffDurationPrev + }() + // Stub the call to net.LookupHost lookupHost = func(host string) (addrs *string, err error) { if net.ParseIP(host) != nil { @@ -139,6 +146,27 @@ func Test_ReconcileLoadBalancer(t *testing.T) { }, wantError: nil, }, + { + name: "reconcile loadbalancer in non active state should timeout", + expectNetwork: func(*mock.MockNetworkClientMockRecorder) { + // add network api call results here + }, + expectLoadBalancer: func(m *mock.MockLbClientMockRecorder) { + pendingLB := loadbalancers.LoadBalancer{ + ID: "aaaaaaaa-bbbb-cccc-dddd-333333333333", + Name: "k8s-clusterapi-cluster-AAAAA-kubeapi", + ProvisioningStatus: "PENDING_CREATE", + } + + // return existing loadbalancer in non-active state + lbList := []loadbalancers.LoadBalancer{pendingLB} + m.ListLoadBalancers(loadbalancers.ListOpts{Name: pendingLB.Name}).Return(lbList, nil) + + // wait for loadbalancer until it times out + m.GetLoadBalancer("aaaaaaaa-bbbb-cccc-dddd-333333333333").Return(&pendingLB, nil).Return(&pendingLB, nil).AnyTimes() + }, + wantError: fmt.Errorf("load balancer \"k8s-clusterapi-cluster-AAAAA-kubeapi\" with id aaaaaaaa-bbbb-cccc-dddd-333333333333 is not active after timeout: timed out waiting for the condition"), + }, } for _, tt := range lbtests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/cloud/services/networking/securitygroups.go b/pkg/cloud/services/networking/securitygroups.go index cc41b9423b..5863adc262 100644 --- a/pkg/cloud/services/networking/securitygroups.go +++ b/pkg/cloud/services/networking/securitygroups.go @@ -297,10 +297,6 @@ func getAllNodesRules(remoteManagedGroups map[string]string, allNodesSecurityGro // validateRemoteManagedGroups validates that the remoteManagedGroups target existing managed security groups. func validateRemoteManagedGroups(remoteManagedGroups map[string]string, ruleRemoteManagedGroups []infrav1.ManagedSecurityGroupName) error { - if len(ruleRemoteManagedGroups) == 0 { - return fmt.Errorf("remoteManagedGroups is required") - } - for _, group := range ruleRemoteManagedGroups { if _, ok := remoteManagedGroups[group.String()]; !ok { return fmt.Errorf("remoteManagedGroups: %s is not a valid remote managed security group", group) diff --git a/pkg/cloud/services/networking/securitygroups_test.go b/pkg/cloud/services/networking/securitygroups_test.go index a9f0ca3163..84d6bdecd4 100644 --- a/pkg/cloud/services/networking/securitygroups_test.go +++ b/pkg/cloud/services/networking/securitygroups_test.go @@ -49,19 +49,14 @@ func TestValidateRemoteManagedGroups(t *testing.T) { wantErr: true, }, { - name: "Valid rule with missing remoteManagedGroups", + name: "Valid rule with no remoteManagedGroups", rule: infrav1.SecurityGroupRuleSpec{ - PortRangeMin: ptr.To(22), - PortRangeMax: ptr.To(22), - Protocol: ptr.To("tcp"), + PortRangeMin: ptr.To(22), + PortRangeMax: ptr.To(22), + Protocol: ptr.To("tcp"), + RemoteIPPrefix: ptr.To("0.0.0.0/0"), }, - remoteManagedGroups: map[string]string{ - "self": "self", - "controlplane": "1", - "worker": "2", - "bastion": "3", - }, - wantErr: true, + wantErr: false, }, { name: "Valid rule with remoteManagedGroups", @@ -171,6 +166,70 @@ func TestGetAllNodesRules(t *testing.T) { }, }, }, + { + name: "Valid remoteIPPrefix in a rule", + remoteManagedGroups: map[string]string{ + "controlplane": "1", + "worker": "2", + }, + allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{ + { + Protocol: ptr.To("tcp"), + PortRangeMin: ptr.To(22), + PortRangeMax: ptr.To(22), + RemoteIPPrefix: ptr.To("0.0.0.0/0"), + }, + }, + wantRules: []resolvedSecurityGroupRuleSpec{ + { + Protocol: "tcp", + PortRangeMin: 22, + PortRangeMax: 22, + RemoteIPPrefix: "0.0.0.0/0", + }, + }, + }, + { + name: "Valid allNodesSecurityGroupRules with no remote parameter", + remoteManagedGroups: map[string]string{ + "controlplane": "1", + "worker": "2", + }, + allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{ + { + Protocol: ptr.To("tcp"), + PortRangeMin: ptr.To(22), + PortRangeMax: ptr.To(22), + }, + }, + wantRules: []resolvedSecurityGroupRuleSpec{ + { + Protocol: "tcp", + PortRangeMin: 22, + PortRangeMax: 22, + }, + }, + wantErr: false, + }, + { + name: "Invalid allNodesSecurityGroupRules with bastion while remoteManagedGroups does not have bastion", + remoteManagedGroups: map[string]string{ + "controlplane": "1", + "worker": "2", + }, + allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{ + { + Protocol: ptr.To("tcp"), + PortRangeMin: ptr.To(22), + PortRangeMax: ptr.To(22), + RemoteManagedGroups: []infrav1.ManagedSecurityGroupName{ + "bastion", + }, + }, + }, + wantRules: nil, + wantErr: true, + }, { name: "Invalid allNodesSecurityGroupRules with wrong remoteManagedGroups", remoteManagedGroups: map[string]string{