From 7d9f21ad6cd3fc555874732408e30a3ed5344e34 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 14 Feb 2022 17:27:12 +0000 Subject: [PATCH 1/5] Don't validate security group passed by uuid --- pkg/cloud/services/compute/instance_test.go | 5 ----- pkg/cloud/services/networking/securitygroups.go | 9 +++++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index cccb1418c6..5ad81e23da 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -33,7 +33,6 @@ import ( "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" @@ -1030,10 +1029,6 @@ func TestService_CreateInstance(t *testing.T) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) - // TODO: Shortcut this API call if security groups are passed by UUID - networkRecorder.ListSecGroup(groups.ListOpts{ID: extraSecurityGroupUUID}). - Return([]groups.SecGroup{{ID: extraSecurityGroupUUID}}, nil) - createMap := getDefaultServerMap() serverMap := createMap["server"].(map[string]interface{}) serverMap["security_groups"] = []map[string]interface{}{ diff --git a/pkg/cloud/services/networking/securitygroups.go b/pkg/cloud/services/networking/securitygroups.go index c339a629a4..7ed2b6968f 100644 --- a/pkg/cloud/services/networking/securitygroups.go +++ b/pkg/cloud/services/networking/securitygroups.go @@ -393,6 +393,15 @@ func (s *Service) generateDesiredSecGroups(openStackCluster *infrav1.OpenStackCl func (s *Service) GetSecurityGroups(securityGroupParams []infrav1.SecurityGroupParam) ([]string, error) { var sgIDs []string for _, sg := range securityGroupParams { + // Don't validate an explicit UUID if we were given one + if sg.UUID != "" { + if isDuplicate(sgIDs, sg.UUID) { + continue + } + sgIDs = append(sgIDs, sg.UUID) + continue + } + listOpts := groups.ListOpts(sg.Filter) if listOpts.ProjectID == "" { listOpts.ProjectID = s.projectID From 4c07893b7cbf09f3d99979967bdc9de220320c04 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 14 Feb 2022 17:12:12 +0000 Subject: [PATCH 2/5] More DRY in CreateBastion and CreateInstance Common network and security group handling between CreateBastion() and CreateInstance(). A principal advantage of this refactor is that it makes the marshalling of OpenStackMachineSpec and Instance respectively into an InstanceSpec a cheap operation which makes no API calls. --- pkg/cloud/services/compute/bastion.go | 30 ++--- pkg/cloud/services/compute/instance.go | 120 ++++++++----------- pkg/cloud/services/compute/instance_types.go | 5 +- 3 files changed, 62 insertions(+), 93 deletions(-) diff --git a/pkg/cloud/services/compute/bastion.go b/pkg/cloud/services/compute/bastion.go index abce789f5e..689187e93e 100644 --- a/pkg/cloud/services/compute/bastion.go +++ b/pkg/cloud/services/compute/bastion.go @@ -34,31 +34,15 @@ func (s *Service) CreateBastion(openStackCluster *infrav1.OpenStackCluster, clus RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, } - securityGroups, err := s.networkingService.GetSecurityGroups(openStackCluster.Spec.Bastion.Instance.SecurityGroups) - if err != nil { - return nil, err - } + instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups if openStackCluster.Spec.ManagedSecurityGroups { - securityGroups = append(securityGroups, openStackCluster.Status.BastionSecurityGroup.ID) + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: openStackCluster.Status.BastionSecurityGroup.ID, + }) } - instanceSpec.SecurityGroups = securityGroups - var nets []infrav1.Network - if len(openStackCluster.Spec.Bastion.Instance.Networks) > 0 { - var err error - nets, err = s.getServerNetworks(openStackCluster.Spec.Bastion.Instance.Networks) - if err != nil { - return nil, err - } - } else { - nets = []infrav1.Network{{ - ID: openStackCluster.Status.Network.ID, - Subnet: &infrav1.Subnet{ - ID: openStackCluster.Status.Network.Subnet.ID, - }, - }} - } - instanceSpec.Networks = nets + instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks + instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports - return s.createInstance(openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) + return s.createInstance(openStackCluster, openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) } diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index d170287888..403388de37 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -70,19 +70,9 @@ func (s *Service) createInstanceImpl(openStackCluster *infrav1.OpenStackCluster, RootVolume: openStackMachine.Spec.RootVolume, Subnet: openStackMachine.Spec.Subnet, ServerGroupID: openStackMachine.Spec.ServerGroupID, + Trunk: openStackMachine.Spec.Trunk, } - // verify that trunk is supported if set at instance level. - if openStackMachine.Spec.Trunk { - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return nil, err - } - if !trunkSupported { - return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") - } - instanceSpec.Trunk = true - } machineTags := []string{} // Append machine specific tags @@ -96,56 +86,44 @@ func (s *Service) createInstanceImpl(openStackCluster *infrav1.OpenStackCluster, instanceSpec.Tags = machineTags - // Get security groups - securityGroups, err := s.networkingService.GetSecurityGroups(openStackMachine.Spec.SecurityGroups) - if err != nil { - return nil, err - } + instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups if openStackCluster.Spec.ManagedSecurityGroups { + var managedSecurityGroup string if util.IsControlPlaneMachine(machine) { - securityGroups = append(securityGroups, openStackCluster.Status.ControlPlaneSecurityGroup.ID) + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID } else { - securityGroups = append(securityGroups, openStackCluster.Status.WorkerSecurityGroup.ID) + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID } - } - instanceSpec.SecurityGroups = securityGroups - nets, err := s.constructNetworks(openStackCluster, openStackMachine) - if err != nil { - return nil, err + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: managedSecurityGroup, + }) } - trunkConfigured := s.isTrunkConfigured(nets, instanceSpec.Trunk) - if trunkConfigured { - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return nil, err - } - if !trunkSupported { - return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") - } - } - instanceSpec.Networks = nets + instanceSpec.Networks = openStackMachine.Spec.Networks + instanceSpec.Ports = openStackMachine.Spec.Ports - return s.createInstance(openStackMachine, clusterName, &instanceSpec, retryInterval) + return s.createInstance(openStackMachine, openStackCluster, clusterName, &instanceSpec, retryInterval) } -// constructNetworks builds an array of networks from the network, subnet and ports items in the machine spec. +// constructNetworks builds an array of networks from the network, subnet and ports items in the instance spec. // If no networks or ports are in the spec, returns a single network item for a network connection to the default cluster network. -func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine) ([]infrav1.Network, error) { - var nets []infrav1.Network - if len(openStackMachine.Spec.Networks) > 0 { - var err error - nets, err = s.getServerNetworks(openStackMachine.Spec.Networks) - if err != nil { - return nil, err - } +func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec) ([]infrav1.Network, error) { + trunkRequired := false + + nets, err := s.getServerNetworks(instanceSpec.Networks) + if err != nil { + return nil, err } - for i, port := range openStackMachine.Spec.Ports { - pOpts := &openStackMachine.Spec.Ports[i] + + for i, port := range instanceSpec.Ports { + pOpts := &instanceSpec.Ports[i] // No Trunk field specified for the port, inherit openStackMachine.Spec.Trunk. if pOpts.Trunk == nil { - pOpts.Trunk = &openStackMachine.Spec.Trunk + pOpts.Trunk = &instanceSpec.Trunk + } + if *pOpts.Trunk { + trunkRequired = true } if port.Network != nil { netID := port.Network.ID @@ -184,14 +162,26 @@ func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, ID: openStackCluster.Status.Network.Subnet.ID, }, PortOpts: &infrav1.PortOpts{ - Trunk: &openStackMachine.Spec.Trunk, + Trunk: &instanceSpec.Trunk, }, }} + trunkRequired = instanceSpec.Trunk + } + + if trunkRequired { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return nil, err + } + if !trunkSupported { + return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") + } } + return nets, nil } -func (s *Service) createInstance(eventObject runtime.Object, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { +func (s *Service) createInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { var server *ServerExt accessIPv4 := "" portList := []servers.Network{} @@ -221,7 +211,17 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, } }() - for i, network := range instanceSpec.Networks { + nets, err := s.constructNetworks(openStackCluster, instanceSpec) + if err != nil { + return nil, err + } + + securityGroups, err := s.networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + for i, network := range nets { if network.ID == "" { return nil, fmt.Errorf("no network was found or provided. Please check your machine configuration and try again") } @@ -230,7 +230,7 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, iTags = instanceSpec.Tags } portName := getPortName(instanceSpec.Name, network.PortOpts, i) - port, err := s.networkingService.GetOrCreatePort(eventObject, clusterName, portName, network, &instanceSpec.SecurityGroups, iTags) + port, err := s.networkingService.GetOrCreatePort(eventObject, clusterName, portName, network, &securityGroups, iTags) if err != nil { return nil, err } @@ -253,7 +253,7 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, AvailabilityZone: instanceSpec.FailureDomain, Networks: portList, UserData: []byte(instanceSpec.UserData), - SecurityGroups: instanceSpec.SecurityGroups, + SecurityGroups: securityGroups, Tags: instanceSpec.Tags, Metadata: instanceSpec.Metadata, ConfigDrive: &instanceSpec.ConfigDrive, @@ -791,19 +791,3 @@ func (s *Service) isTrunkExtSupported() (trunknSupported bool, err error) { } return true, nil } - -// isTrunkConfigured verifies trunk configuration at instance and port levels, useful for avoiding multple api calls to verify trunk support. -func (s *Service) isTrunkConfigured(nets []infrav1.Network, instanceLevelTrunk bool) bool { - if instanceLevelTrunk { - return true - } - for _, net := range nets { - port := net.PortOpts - if port != nil { - if port.Trunk != nil && *port.Trunk { - return true - } - } - } - return false -} diff --git a/pkg/cloud/services/compute/instance_types.go b/pkg/cloud/services/compute/instance_types.go index 495fca1f1c..7eefeb8384 100644 --- a/pkg/cloud/services/compute/instance_types.go +++ b/pkg/cloud/services/compute/instance_types.go @@ -46,8 +46,9 @@ type InstanceSpec struct { ServerGroupID string Trunk bool Tags []string - SecurityGroups []string - Networks []infrav1.Network + SecurityGroups []infrav1.SecurityGroupParam + Networks []infrav1.NetworkParam + Ports []infrav1.PortOpts } // InstanceIdentifier describes an instance which has not necessarily been fetched. From 6f6112198450992afe36ca8786817d7632a01a1a Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Tue, 15 Feb 2022 10:11:55 +0000 Subject: [PATCH 3/5] Refactor CreateBastion and CreateInstance for idempotence Allow CreateBastion and CreateInstance to be called as reconcilers. Specifically they become idempotent and can additionally return a 'not yet complete' status by returning a nil InstanceStatus. This new status is handled in the controller by rescheduling reconciliation. To reflect this change we rename the methods to ReconcileBastion and ReconcileInstance. In making this change we also make some opportunistic changes to reconciliation of the Bastion: * Bastion reconciliation errors no longer put the cluster in a failed state * We mark the cluster Ready before creating the Bastion * We handle the case where the Bastion floating IP is already associated Apart from the Bastion changes, this is almost entirely code motion as can be seen in the unit tests. While we permit the Reconcile methods to return an incomplete state, nothing yet returns it. The only change in the unit tests is due to moving the GetInstanceStatusByName check which is common to the bastion and machines into reconcileInstance. --- controllers/openstackcluster_controller.go | 112 ++++++++++++-------- controllers/openstackmachine_controller.go | 32 ++---- pkg/cloud/services/compute/bastion.go | 4 +- pkg/cloud/services/compute/instance.go | 19 +++- pkg/cloud/services/compute/instance_test.go | 7 +- 5 files changed, 93 insertions(+), 81 deletions(-) diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index ac23a9aa35..723ce28439 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -130,9 +130,13 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { scope.Logger.Info("Reconciling Cluster delete") - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { + deleted, err := deleteBastion(scope, cluster, openStackCluster) + if err != nil { return reconcile.Result{}, err } + if !deleted { + return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil + } networkingService, err := networking.NewService(scope) if err != nil { @@ -189,34 +193,33 @@ func contains(arr []string, target string) bool { return false } -func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { computeService, err := compute.NewService(scope) if err != nil { - return err + return false, err } networkingService, err := networking.NewService(scope) if err != nil { - return err + return false, err } instanceName := fmt.Sprintf("%s-bastion", cluster.Name) instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, instanceName) if err != nil { - return err + return false, err } if instanceStatus != nil { instanceNS, err := instanceStatus.NetworkStatus() if err != nil { - return err + return false, err } addresses := instanceNS.Addresses() for _, address := range addresses { if address.Type == corev1.NodeExternalIP { if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete floating IP: %v", err)) - return errors.Errorf("failed to delete floating IP: %v", err) + return false, errors.Errorf("failed to delete floating IP: %v", err) } } } @@ -224,19 +227,17 @@ func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackClus machineSpec := &openStackCluster.Spec.Bastion.Instance if err = computeService.DeleteInstance(openStackCluster, machineSpec, instanceName, instanceStatus); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion: %v", err)) - return errors.Errorf("failed to delete bastion: %v", err) + return false, errors.Errorf("failed to delete bastion: %v", err) } openStackCluster.Status.Bastion = nil if err = networkingService.DeleteBastionSecurityGroup(openStackCluster, fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion security group: %v", err)) - return errors.Errorf("failed to delete bastion security group: %v", err) + return false, errors.Errorf("failed to delete bastion security group: %v", err) } openStackCluster.Status.BastionSecurityGroup = nil - return nil + return true, nil } func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { @@ -259,10 +260,6 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch return reconcile.Result{}, err } - if err = reconcileBastion(scope, cluster, openStackCluster); err != nil { - return reconcile.Result{}, err - } - availabilityZones, err := computeService.GetAvailabilityZones() if err != nil { return ctrl.Result{}, err @@ -288,14 +285,30 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch } } - openStackCluster.Status.Ready = true openStackCluster.Status.FailureMessage = nil openStackCluster.Status.FailureReason = nil + if !openStackCluster.Status.Ready { + openStackCluster.Status.Ready = true + + // If we're setting Ready, return early to update status and + // allow dependent operations to proceed. Ensure we call + // reconcile again to create the bastion. + return reconcile.Result{Requeue: true}, nil + } + + reconciled, err := reconcileBastion(scope, cluster, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + if !reconciled { + return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil + } + scope.Logger.Info("Reconciled Cluster create successfully") return reconcile.Result{}, nil } -func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { scope.Logger.Info("Reconciling Bastion") if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled { @@ -304,56 +317,61 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC computeService, err := compute.NewService(scope) if err != nil { - return err + return false, err } - instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, fmt.Sprintf("%s-bastion", cluster.Name)) + instanceStatus, err := computeService.ReconcileBastion(openStackCluster, cluster.Name) if err != nil { - return err + return false, errors.Errorf("failed to reconcile bastion: %v", err) } - if instanceStatus != nil { - bastion, err := instanceStatus.APIInstance(openStackCluster) - if err != nil { - return err - } - openStackCluster.Status.Bastion = bastion - return nil + if instanceStatus == nil { + // Bastion is not ready yet + return false, nil + } + + // We overwrite the bastion status with the status of the instance from + // OpenStack. However, we don't want to lose any previously created + // floating IP which hasn't been associated yet, so we keep a reference + // to it here. + var floatingIP string + if openStackCluster.Status.Bastion != nil { + floatingIP = openStackCluster.Status.Bastion.FloatingIP } - instanceStatus, err = computeService.CreateBastion(openStackCluster, cluster.Name) + bastion, err := instanceStatus.APIInstance(openStackCluster) if err != nil { - return errors.Errorf("failed to reconcile bastion: %v", err) + return false, err + } + openStackCluster.Status.Bastion = bastion + + // Bastion already has a floating IP + if bastion.FloatingIP != "" { + return true, nil } networkingService, err := networking.NewService(scope) if err != nil { - return err + return false, err } + clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) - fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.Bastion.Instance.FloatingIP) + fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, floatingIP) if err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to get or create floating IP for bastion: %v", err)) - return errors.Errorf("failed to get or create floating IP for bastion: %v", err) + return false, errors.Errorf("failed to get or create floating IP for bastion: %v", err) } + bastion.FloatingIP = fp.FloatingIP + port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) if err != nil { - err = errors.Errorf("getting management port for bastion: %v", err) - handleUpdateOSCError(openStackCluster, err) - return err + return false, errors.Errorf("getting management port for bastion: %v", err) } + err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID) if err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to associate floating IP with bastion: %v", err)) - return errors.Errorf("failed to associate floating IP with bastion: %v", err) + return false, errors.Errorf("failed to associate floating IP with bastion: %v", err) } - bastion, err := instanceStatus.APIInstance(openStackCluster) - if err != nil { - return err - } - bastion.FloatingIP = fp.FloatingIP - openStackCluster.Status.Bastion = bastion - return nil + return true, nil } func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 37e3d609ad..5c44deacc5 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -63,8 +63,8 @@ type OpenStackMachineReconciler struct { } const ( - waitForClusterInfrastructureReadyDuration = 15 * time.Second - waitForInstanceBecomeActiveToReconcile = 60 * time.Second + defaultOpenStackBackOff = 15 * time.Second + waitForInstanceBecomeActiveToReconcile = 60 * time.Second ) // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackmachines,verbs=get;list;watch;create;update;patch;delete @@ -277,7 +277,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope if !cluster.Status.InfrastructureReady { scope.Logger.Info("Cluster infrastructure is not ready yet, requeuing machine") - return ctrl.Result{RequeueAfter: waitForClusterInfrastructureReadyDuration}, nil + return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil } // Make sure bootstrap data is available and populated. @@ -303,16 +303,13 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(scope.Logger, cluster, openStackCluster, machine, openStackMachine, computeService, userData) + instanceStatus, err := computeService.ReconcileMachine(openStackCluster, machine, openStackMachine, cluster.Name, userData) if err != nil { handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err)) - return ctrl.Result{}, err + return ctrl.Result{}, errors.Errorf("error creating Openstack instance: %v", err) } - - // Set an error message if we couldn't find the instance. if instanceStatus == nil { - handleUpdateMachineError(scope.Logger, openStackMachine, errors.New("OpenStack instance cannot be found")) - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil } // TODO(sbueringer) From CAPA: TODO(ncdc): move this validation logic into a validating webhook (for us: create validation logic in webhook) @@ -384,23 +381,6 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { - instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) - if err != nil { - return nil, err - } - - if instanceStatus == nil { - logger.Info("Machine not exist, Creating Machine", "Machine", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackCluster, machine, openStackMachine, cluster.Name, userData) - if err != nil { - return nil, errors.Errorf("error creating Openstack instance: %v", err) - } - } - - return instanceStatus, nil -} - func handleUpdateMachineError(logger logr.Logger, openstackMachine *infrav1.OpenStackMachine, message error) { err := capierrors.UpdateMachineError openstackMachine.Status.FailureReason = &err diff --git a/pkg/cloud/services/compute/bastion.go b/pkg/cloud/services/compute/bastion.go index 689187e93e..2fcd5e51b4 100644 --- a/pkg/cloud/services/compute/bastion.go +++ b/pkg/cloud/services/compute/bastion.go @@ -22,7 +22,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" ) -func (s *Service) CreateBastion(openStackCluster *infrav1.OpenStackCluster, clusterName string) (*InstanceStatus, error) { +func (s *Service) ReconcileBastion(openStackCluster *infrav1.OpenStackCluster, clusterName string) (*InstanceStatus, error) { name := fmt.Sprintf("%s-bastion", clusterName) instanceSpec := &InstanceSpec{ Name: name, @@ -44,5 +44,5 @@ func (s *Service) CreateBastion(openStackCluster *infrav1.OpenStackCluster, clus instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports - return s.createInstance(openStackCluster, openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) + return s.reconcileInstance(openStackCluster, openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) } diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index 403388de37..365b35b084 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -44,11 +44,11 @@ const ( timeoutInstanceDelete = 5 * time.Minute ) -func (s *Service) CreateInstance(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string) (instance *InstanceStatus, err error) { - return s.createInstanceImpl(openStackCluster, machine, openStackMachine, clusterName, userData, retryIntervalInstanceStatus) +func (s *Service) ReconcileMachine(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string) (instance *InstanceStatus, err error) { + return s.reconcileMachineImpl(openStackCluster, machine, openStackMachine, clusterName, userData, retryIntervalInstanceStatus) } -func (s *Service) createInstanceImpl(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string, retryInterval time.Duration) (instance *InstanceStatus, err error) { +func (s *Service) reconcileMachineImpl(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string, retryInterval time.Duration) (instance *InstanceStatus, err error) { if openStackMachine == nil { return nil, fmt.Errorf("create Options need be specified to create instace") } @@ -103,7 +103,7 @@ func (s *Service) createInstanceImpl(openStackCluster *infrav1.OpenStackCluster, instanceSpec.Networks = openStackMachine.Spec.Networks instanceSpec.Ports = openStackMachine.Spec.Ports - return s.createInstance(openStackMachine, openStackCluster, clusterName, &instanceSpec, retryInterval) + return s.reconcileInstance(openStackMachine, openStackCluster, clusterName, &instanceSpec, retryInterval) } // constructNetworks builds an array of networks from the network, subnet and ports items in the instance spec. @@ -181,7 +181,16 @@ func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, return nets, nil } -func (s *Service) createInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { +// reconcileInstance is a common function for reconciling an OpenStack instance and its dependencies used by both Machines and the Bastion. +func (s *Service) reconcileInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { + instanceStatus, err := s.GetInstanceStatusByName(openStackCluster, instanceSpec.Name) + if err != nil { + return nil, fmt.Errorf("failed to get instance status for %s: %w", instanceSpec.Name, err) + } + if instanceStatus != nil { + return instanceStatus, nil + } + var server *ServerExt accessIPv4 := "" portList := []servers.Network{} diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index 5ad81e23da..32a6dbde32 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -685,6 +685,10 @@ func TestService_CreateInstance(t *testing.T) { computeRecorder.GetFlavorIDFromName(flavorName).Return(flavorUUID, nil) } + expectGetDefaultServerByName := func(computeRecorder *MockClientMockRecorder) { + computeRecorder.ListServers(servers.ListOpts{Name: fmt.Sprintf("^%s$", openStackMachineName)}).Return([]ServerExt{}, nil) + } + // Expected calls and custom match function for creating a server expectCreateServer := func(computeRecorder *MockClientMockRecorder, expectedCreateOpts map[string]interface{}, wantError bool) { // This nonsense is because ConfigDrive is a bool pointer, so we @@ -1111,6 +1115,7 @@ func TestService_CreateInstance(t *testing.T) { computeRecorder := mockComputeClient.EXPECT() networkRecorder := mockNetworkClient.EXPECT() + expectGetDefaultServerByName(computeRecorder) tt.expect(computeRecorder, networkRecorder) s := Service{ @@ -1124,7 +1129,7 @@ func TestService_CreateInstance(t *testing.T) { ), } // Call CreateInstance with a reduced retry interval to speed up the test - _, err := s.createInstanceImpl(tt.getOpenStackCluster(), tt.getMachine(), tt.getOpenStackMachine(), "cluster-name", "user-data", time.Second) + _, err := s.reconcileMachineImpl(tt.getOpenStackCluster(), tt.getMachine(), tt.getOpenStackMachine(), "cluster-name", "user-data", time.Second) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return From 5a82b9292fab580bbdcb901adb12d22cedff922a Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Thu, 24 Feb 2022 16:15:34 +0000 Subject: [PATCH 4/5] Make InstanceSpec the canonical struct for instance creation Refactor instance creation in machine controller and cluster controller (for the bastion) to call compute.ReconcileInstance() with an InstanceSpec. --- controllers/openstackcluster_controller.go | 28 ++- controllers/openstackmachine_controller.go | 83 ++++++- .../openstackmachine_controller_test.go | 226 ++++++++++++++++++ pkg/cloud/services/compute/bastion.go | 48 ---- pkg/cloud/services/compute/instance.go | 92 +------ pkg/cloud/services/compute/instance_test.go | 212 +++++----------- 6 files changed, 402 insertions(+), 287 deletions(-) create mode 100644 controllers/openstackmachine_controller_test.go delete mode 100644 pkg/cloud/services/compute/bastion.go diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 723ce28439..0496ba5603 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -320,7 +320,8 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC return false, err } - instanceStatus, err := computeService.ReconcileBastion(openStackCluster, cluster.Name) + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) + instanceStatus, err := computeService.ReconcileInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) if err != nil { return false, errors.Errorf("failed to reconcile bastion: %v", err) } @@ -374,6 +375,31 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC return true, nil } +func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string) *compute.InstanceSpec { + name := fmt.Sprintf("%s-bastion", clusterName) + instanceSpec := &compute.InstanceSpec{ + Name: name, + Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, + SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, + Image: openStackCluster.Spec.Bastion.Instance.Image, + ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, + FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, + RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, + } + + instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups + if openStackCluster.Spec.ManagedSecurityGroups { + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: openStackCluster.Status.BastionSecurityGroup.ID, + }) + } + + instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks + instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports + + return instanceSpec +} + func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 5c44deacc5..8e83c9e0c8 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -303,10 +303,18 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := computeService.ReconcileMachine(openStackCluster, machine, openStackMachine, cluster.Name, userData) + instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) if err != nil { - handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err)) - return ctrl.Result{}, errors.Errorf("error creating Openstack instance: %v", err) + err = errors.Errorf("machine spec is invalid: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) + return ctrl.Result{}, err + } + + instanceStatus, err := computeService.ReconcileInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name) + if err != nil { + err = errors.Errorf("openstack instance cannot be created: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) + return ctrl.Result{}, err } if instanceStatus == nil { return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil @@ -381,6 +389,75 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } +func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) (*compute.InstanceSpec, error) { + if openStackMachine == nil { + return nil, fmt.Errorf("create Options need be specified to create instace") + } + + if machine.Spec.FailureDomain == nil { + return nil, fmt.Errorf("failure domain not set") + } + + instanceSpec := compute.InstanceSpec{ + Name: openStackMachine.Name, + Image: openStackMachine.Spec.Image, + ImageUUID: openStackMachine.Spec.ImageUUID, + Flavor: openStackMachine.Spec.Flavor, + SSHKeyName: openStackMachine.Spec.SSHKeyName, + UserData: userData, + Metadata: openStackMachine.Spec.ServerMetadata, + ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, + FailureDomain: *machine.Spec.FailureDomain, + RootVolume: openStackMachine.Spec.RootVolume, + Subnet: openStackMachine.Spec.Subnet, + ServerGroupID: openStackMachine.Spec.ServerGroupID, + Trunk: openStackMachine.Spec.Trunk, + } + + machineTags := []string{} + + // Append machine specific tags + machineTags = append(machineTags, openStackMachine.Spec.Tags...) + + // Append cluster scope tags + machineTags = append(machineTags, openStackCluster.Spec.Tags...) + + // tags need to be unique or the "apply tags" call will fail. + deduplicate := func(tags []string) []string { + seen := make(map[string]struct{}, len(machineTags)) + unique := make([]string, 0, len(machineTags)) + for _, tag := range tags { + if _, ok := seen[tag]; !ok { + seen[tag] = struct{}{} + unique = append(unique, tag) + } + } + return unique + } + machineTags = deduplicate(machineTags) + + instanceSpec.Tags = machineTags + + instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups + if openStackCluster.Spec.ManagedSecurityGroups { + var managedSecurityGroup string + if util.IsControlPlaneMachine(machine) { + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID + } else { + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID + } + + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: managedSecurityGroup, + }) + } + + instanceSpec.Networks = openStackMachine.Spec.Networks + instanceSpec.Ports = openStackMachine.Spec.Ports + + return &instanceSpec, nil +} + func handleUpdateMachineError(logger logr.Logger, openstackMachine *infrav1.OpenStackMachine, message error) { err := capierrors.UpdateMachineError openstackMachine.Status.FailureReason = &err diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go new file mode 100644 index 0000000000..d795378e8a --- /dev/null +++ b/controllers/openstackmachine_controller_test.go @@ -0,0 +1,226 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" +) + +const ( + networkUUID = "d412171b-9fd7-41c1-95a6-c24e5953974d" + subnetUUID = "d2d8d98d-b234-477e-a547-868b7cb5d6a5" + extraSecurityGroupUUID = "514bb2d8-3390-4a3b-86a7-7864ba57b329" + controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" + workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" + serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" + + openStackMachineName = "test-openstack-machine" + namespace = "test-namespace" + imageName = "test-image" + flavorName = "test-flavor" + sshKeyName = "test-ssh-key" + failureDomain = "test-failure-domain" +) + +func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.Network{ + ID: networkUUID, + Subnet: &infrav1.Subnet{ + ID: subnetUUID, + }, + }, + ControlPlaneSecurityGroup: &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID}, + WorkerSecurityGroup: &infrav1.SecurityGroup{ID: workerSecurityGroupUUID}, + }, + } +} + +func getDefaultMachine() *clusterv1.Machine { + return &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + FailureDomain: pointer.StringPtr(failureDomain), + }, + } +} + +func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: openStackMachineName, + Namespace: namespace, + }, + Spec: infrav1.OpenStackMachineSpec{ + // ProviderID is set by the controller + // InstanceID is set by the controller + // FloatingIP is only used by the cluster controller for the Bastion + // TODO: Test Networks, Ports, Subnet, and Trunk separately + CloudName: "test-cloud", + Flavor: flavorName, + Image: imageName, + SSHKeyName: sshKeyName, + Tags: []string{"test-tag"}, + ServerMetadata: map[string]string{ + "test-metadata": "test-value", + }, + ConfigDrive: pointer.BoolPtr(true), + ServerGroupID: serverGroupUUID, + }, + } +} + +func getDefaultInstanceSpec() *compute.InstanceSpec { + return &compute.InstanceSpec{ + Name: openStackMachineName, + Image: imageName, + Flavor: flavorName, + SSHKeyName: sshKeyName, + UserData: "user-data", + Metadata: map[string]string{ + "test-metadata": "test-value", + }, + ConfigDrive: *pointer.BoolPtr(true), + FailureDomain: *pointer.StringPtr(failureDomain), + ServerGroupID: serverGroupUUID, + Tags: []string{"test-tag"}, + } +} + +func Test_machineToInstanceSpec(t *testing.T) { + RegisterTestingT(t) + + tests := []struct { + name string + openStackCluster func() *infrav1.OpenStackCluster + machine func() *clusterv1.Machine + openStackMachine func() *infrav1.OpenStackMachine + wantInstanceSpec func() *compute.InstanceSpec + wantErr bool + }{ + { + name: "Defaults", + openStackCluster: getDefaultOpenStackCluster, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: getDefaultInstanceSpec, + wantErr: false, + }, + { + name: "Control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabelName: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: controlPlaneSecurityGroupUUID}} + return i + }, + wantErr: false, + }, + { + name: "Worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: workerSecurityGroupUUID}} + return i + }, + wantErr: false, + }, + { + name: "Extra security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: extraSecurityGroupUUID}} + return m + }, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{ + {UUID: extraSecurityGroupUUID}, + {UUID: workerSecurityGroupUUID}, + } + return i + }, + wantErr: false, + }, + { + name: "Tags", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.Tags = []string{"cluster-tag", "duplicate-tag"} + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.Tags = []string{"machine-tag", "duplicate-tag"} + return m + }, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.Tags = []string{"machine-tag", "duplicate-tag", "cluster-tag"} + return i + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := machineToInstanceSpec(tt.openStackCluster(), tt.machine(), tt.openStackMachine(), "user-data") + if tt.wantErr { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).NotTo(HaveOccurred()) + } + + Expect(got).To(Equal(tt.wantInstanceSpec())) + }) + } +} diff --git a/pkg/cloud/services/compute/bastion.go b/pkg/cloud/services/compute/bastion.go deleted file mode 100644 index 2fcd5e51b4..0000000000 --- a/pkg/cloud/services/compute/bastion.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - - infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" -) - -func (s *Service) ReconcileBastion(openStackCluster *infrav1.OpenStackCluster, clusterName string) (*InstanceStatus, error) { - name := fmt.Sprintf("%s-bastion", clusterName) - instanceSpec := &InstanceSpec{ - Name: name, - Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, - SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, - Image: openStackCluster.Spec.Bastion.Instance.Image, - ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, - FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, - RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, - } - - instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups - if openStackCluster.Spec.ManagedSecurityGroups { - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ - UUID: openStackCluster.Status.BastionSecurityGroup.ID, - }) - } - - instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks - instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports - - return s.reconcileInstance(openStackCluster, openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) -} diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index 365b35b084..f324762305 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -30,7 +30,6 @@ import ( "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" @@ -44,68 +43,6 @@ const ( timeoutInstanceDelete = 5 * time.Minute ) -func (s *Service) ReconcileMachine(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string) (instance *InstanceStatus, err error) { - return s.reconcileMachineImpl(openStackCluster, machine, openStackMachine, clusterName, userData, retryIntervalInstanceStatus) -} - -func (s *Service) reconcileMachineImpl(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string, retryInterval time.Duration) (instance *InstanceStatus, err error) { - if openStackMachine == nil { - return nil, fmt.Errorf("create Options need be specified to create instace") - } - - if machine.Spec.FailureDomain == nil { - return nil, fmt.Errorf("failure domain not set") - } - - instanceSpec := InstanceSpec{ - Name: openStackMachine.Name, - Image: openStackMachine.Spec.Image, - ImageUUID: openStackMachine.Spec.ImageUUID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: openStackMachine.Spec.ServerMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - FailureDomain: *machine.Spec.FailureDomain, - RootVolume: openStackMachine.Spec.RootVolume, - Subnet: openStackMachine.Spec.Subnet, - ServerGroupID: openStackMachine.Spec.ServerGroupID, - Trunk: openStackMachine.Spec.Trunk, - } - - machineTags := []string{} - - // Append machine specific tags - machineTags = append(machineTags, openStackMachine.Spec.Tags...) - - // Append cluster scope tags - machineTags = append(machineTags, openStackCluster.Spec.Tags...) - - // tags need to be unique or the "apply tags" call will fail. - machineTags = deduplicate(machineTags) - - instanceSpec.Tags = machineTags - - instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups - if openStackCluster.Spec.ManagedSecurityGroups { - var managedSecurityGroup string - if util.IsControlPlaneMachine(machine) { - managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID - } else { - managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID - } - - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ - UUID: managedSecurityGroup, - }) - } - - instanceSpec.Networks = openStackMachine.Spec.Networks - instanceSpec.Ports = openStackMachine.Spec.Ports - - return s.reconcileInstance(openStackMachine, openStackCluster, clusterName, &instanceSpec, retryInterval) -} - // constructNetworks builds an array of networks from the network, subnet and ports items in the instance spec. // If no networks or ports are in the spec, returns a single network item for a network connection to the default cluster network. func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec) ([]infrav1.Network, error) { @@ -181,9 +118,13 @@ func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, return nets, nil } -// reconcileInstance is a common function for reconciling an OpenStack instance and its dependencies used by both Machines and the Bastion. -func (s *Service) reconcileInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { - instanceStatus, err := s.GetInstanceStatusByName(openStackCluster, instanceSpec.Name) +// ReconcileInstance is a common function for reconciling an OpenStack instance and its dependencies used by both Machines and the Bastion. +func (s *Service) ReconcileInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) { + return s.reconcileInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus) +} + +func (s *Service) reconcileInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) { + instanceStatus, err := s.GetInstanceStatusByName(eventObject, instanceSpec.Name) if err != nil { return nil, fmt.Errorf("failed to get instance status for %s: %w", instanceSpec.Name, err) } @@ -216,7 +157,7 @@ func (s *Service) reconcileInstance(eventObject runtime.Object, openStackCluster } if err := s.deletePorts(eventObject, portList); err != nil { - s.scope.Logger.V(4).Error(err, "failed to clean up ports after failure", "cluster", clusterName, "machine", instanceSpec.Name) + s.scope.Logger.V(4).Error(err, "Failed to clean up ports after failure") } }() @@ -762,23 +703,6 @@ func (s *Service) GetInstanceStatusByName(eventObject runtime.Object, name strin return nil, nil } -// deduplicate takes a slice of input strings and filters out any duplicate -// string occurrences, for example making ["a", "b", "a", "c"] become ["a", "b", -// "c"]. -func deduplicate(sequence []string) []string { - var unique []string - set := make(map[string]bool) - - for _, s := range sequence { - if _, ok := set[s]; !ok { - unique = append(unique, s) - set[s] = true - } - } - - return unique -} - func getTimeout(name string, timeout int) time.Duration { if v := os.Getenv(name); v != "" { timeout, err := strconv.Atoi(v) diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index 32a6dbde32..6c1486857a 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -43,7 +43,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" @@ -524,7 +523,6 @@ const ( imageUUID = "652b5a05-27fa-41d4-ac82-3e63cf6f7ab7" flavorUUID = "6dc820db-f912-454e-a1e3-1081f3b8cc72" instanceUUID = "383a8ec1-b6ea-4493-99dd-fc790da04ba9" - extraSecurityGroupUUID = "514bb2d8-3390-4a3b-86a7-7864ba57b329" controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" @@ -536,10 +534,9 @@ const ( imageName = "test-image" flavorName = "test-flavor" sshKeyName = "test-ssh-key" + failureDomain = "test-failure-domain" ) -var failureDomain = "test-failure-domain" - func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { return &infrav1.OpenStackCluster{ Spec: infrav1.OpenStackClusterSpec{}, @@ -556,14 +553,6 @@ func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { } } -func getDefaultMachine() *clusterv1.Machine { - return &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - FailureDomain: &failureDomain, - }, - } -} - func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { return &infrav1.OpenStackMachine{ ObjectMeta: metav1.ObjectMeta{ @@ -589,7 +578,25 @@ func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { } } -func TestService_CreateInstance(t *testing.T) { +func getDefaultInstanceSpec() *InstanceSpec { + return &InstanceSpec{ + Name: openStackMachineName, + Image: imageName, + Flavor: flavorName, + SSHKeyName: sshKeyName, + UserData: "user-data", + Metadata: map[string]string{ + "test-metadata": "test-value", + }, + ConfigDrive: *pointer.BoolPtr(true), + FailureDomain: *pointer.StringPtr(failureDomain), + ServerGroupID: serverGroupUUID, + Tags: []string{"test-tag"}, + SecurityGroups: []infrav1.SecurityGroupParam{{UUID: workerSecurityGroupUUID}}, + } +} + +func TestService_ReconcileInstance(t *testing.T) { RegisterTestingT(t) getDefaultServerMap := func() map[string]interface{} { @@ -602,6 +609,9 @@ func TestService_CreateInstance(t *testing.T) { "imageRef": imageUUID, "flavorRef": flavorUUID, "availability_zone": failureDomain, + "security_groups": []map[string]interface{}{ + {"name": workerSecurityGroupUUID}, + }, "networks": []map[string]interface{}{ {"port": portUUID}, }, @@ -744,18 +754,14 @@ func TestService_CreateInstance(t *testing.T) { // ******************* tests := []struct { - name string - getMachine func() *clusterv1.Machine - getOpenStackCluster func() *infrav1.OpenStackCluster - getOpenStackMachine func() *infrav1.OpenStackMachine - expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) - wantErr bool + name string + getInstanceSpec func() *InstanceSpec + expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) + wantErr bool }{ { - name: "Defaults", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Defaults", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -766,10 +772,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Delete ports on server create error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Delete ports on server create error", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -782,16 +786,14 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Delete previously created ports on port creation error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Ports = []infrav1.PortOpts{ + name: "Delete previously created ports on port creation error", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.Ports = []infrav1.PortOpts{ {Description: "Test port 0"}, {Description: "Test port 1"}, } - return m + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{{ID: imageUUID}}, nil) @@ -811,10 +813,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Poll until server is created", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Poll until server is created", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -825,10 +825,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Server errors during creation", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Server errors during creation", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -841,15 +839,13 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Boot from volume success", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume success", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -886,17 +882,15 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Boot from volume with explicit AZ and volume type", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume with explicit AZ and volume type", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, AvailabilityZone: "test-alternate-az", VolumeType: "test-volume-type", } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -934,15 +928,13 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Boot from volume failure cleans up ports", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume failure cleans up ports", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -965,96 +957,14 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Set control plane security group", - getMachine: func() *clusterv1.Machine { - machine := getDefaultMachine() - machine.Labels = map[string]string{ - clusterv1.MachineControlPlaneLabelName: "true", - } - return machine - }, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: getDefaultOpenStackMachine, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": controlPlaneSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Set worker security group", - getMachine: getDefaultMachine, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: getDefaultOpenStackMachine, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": workerSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Set extra security group", - getMachine: getDefaultMachine, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: extraSecurityGroupUUID}} - return osMachine - }, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": extraSecurityGroupUUID}, - {"name": workerSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Delete trunks on port creation error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Ports = []infrav1.PortOpts{ + name: "Delete trunks on port creation error", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.Ports = []infrav1.PortOpts{ {Description: "Test port 0", Trunk: pointer.BoolPtr(true)}, {Description: "Test port 1"}, } - return m + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{{ID: imageUUID}}, nil) @@ -1129,7 +1039,7 @@ func TestService_CreateInstance(t *testing.T) { ), } // Call CreateInstance with a reduced retry interval to speed up the test - _, err := s.reconcileMachineImpl(tt.getOpenStackCluster(), tt.getMachine(), tt.getOpenStackMachine(), "cluster-name", "user-data", time.Second) + _, err := s.reconcileInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return From 34cbf11bc30572bd692ccdb3c5153f8ae01649a8 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Thu, 24 Feb 2022 16:55:01 +0000 Subject: [PATCH 5/5] Update DeleteInstance to use InstanceSpec --- controllers/openstackcluster_controller.go | 4 +- controllers/openstackmachine_controller.go | 9 ++- pkg/cloud/services/compute/instance.go | 6 +- pkg/cloud/services/compute/instance_test.go | 72 +++++---------------- 4 files changed, 29 insertions(+), 62 deletions(-) diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 0496ba5603..2dc01dd558 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -225,8 +225,8 @@ func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackClus } } - machineSpec := &openStackCluster.Spec.Bastion.Instance - if err = computeService.DeleteInstance(openStackCluster, machineSpec, instanceName, instanceStatus); err != nil { + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) + if err = computeService.DeleteInstance(openStackCluster, instanceSpec, instanceStatus); err != nil { return false, errors.Errorf("failed to delete bastion: %v", err) } diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 8e83c9e0c8..47c7617b3f 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -248,7 +248,14 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope } } - if err := computeService.DeleteInstance(openStackMachine, &openStackMachine.Spec, openStackMachine.Name, instanceStatus); err != nil { + instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, "") + if err != nil { + err = errors.Errorf("machine spec is invalid: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) + return ctrl.Result{}, err + } + + if err := computeService.DeleteInstance(openStackMachine, instanceSpec, instanceStatus); err != nil { handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error deleting OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) return ctrl.Result{}, nil } diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index f324762305..36c78e8c1d 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -515,7 +515,7 @@ func (s *Service) GetManagementPort(openStackCluster *infrav1.OpenStackCluster, return &allPorts[0], nil } -func (s *Service) DeleteInstance(eventObject runtime.Object, openStackMachineSpec *infrav1.OpenStackMachineSpec, instanceName string, instanceStatus *InstanceStatus) error { +func (s *Service) DeleteInstance(eventObject runtime.Object, instanceSpec *InstanceSpec, instanceStatus *InstanceStatus) error { if instanceStatus == nil { /* We create a boot-from-volume instance in 2 steps: @@ -535,9 +535,9 @@ func (s *Service) DeleteInstance(eventObject runtime.Object, openStackMachineSpe Note that we don't need to separately delete the root volume when deleting the instance because DeleteOnTermination will ensure it is deleted in that case. */ - rootVolume := openStackMachineSpec.RootVolume + rootVolume := instanceSpec.RootVolume if hasRootVolume(rootVolume) { - name := rootVolumeName(instanceName) + name := rootVolumeName(instanceSpec.Name) volume, err := s.getVolumeByName(name) if err != nil { return err diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index 6c1486857a..2c255d698c 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -40,7 +40,6 @@ import ( . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" gomegatypes "github.com/onsi/gomega/types" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" @@ -530,7 +529,6 @@ const ( openStackMachineName = "test-openstack-machine" portName = "test-openstack-machine-0" - namespace = "test-namespace" imageName = "test-image" flavorName = "test-flavor" sshKeyName = "test-ssh-key" @@ -553,31 +551,6 @@ func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { } } -func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { - return &infrav1.OpenStackMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: openStackMachineName, - Namespace: namespace, - }, - Spec: infrav1.OpenStackMachineSpec{ - // ProviderID is set by the controller - // InstanceID is set by the controller - // FloatingIP is only used by the cluster controller for the Bastion - // TODO: Test Networks, Ports, Subnet, and Trunk separately - CloudName: "test-cloud", - Flavor: flavorName, - Image: imageName, - SSHKeyName: sshKeyName, - Tags: []string{"test-tag"}, - ServerMetadata: map[string]string{ - "test-metadata": "test-value", - }, - ConfigDrive: pointer.BoolPtr(true), - ServerGroupID: serverGroupUUID, - }, - } -} - func getDefaultInstanceSpec() *InstanceSpec { return &InstanceSpec{ Name: openStackMachineName, @@ -1051,15 +1024,6 @@ func TestService_ReconcileInstance(t *testing.T) { func TestService_DeleteInstance(t *testing.T) { RegisterTestingT(t) - const instanceUUID = "7b8a2800-c615-4f52-9b75-d2ba60a2af66" - const portUUID = "94f3e9cb-89d5-4313-ad6d-44035722342b" - - const instanceName = "test-instance" - - getEventObject := func() runtime.Object { - return &infrav1.OpenStackMachine{} - } - getDefaultInstanceStatus := func() *InstanceStatus { return &InstanceStatus{ server: &ServerExt{ @@ -1070,27 +1034,23 @@ func TestService_DeleteInstance(t *testing.T) { } } - getDefaultOpenStackMachineSpec := func() *infrav1.OpenStackMachineSpec { - return &getDefaultOpenStackMachine().Spec - } - // ******************* // START OF TEST CASES // ******************* tests := []struct { - name string - eventObject runtime.Object - getOpenStackMachineSpec func() *infrav1.OpenStackMachineSpec - getInstanceStatus func() *InstanceStatus - expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) - wantErr bool + name string + eventObject runtime.Object + instanceSpec func() *InstanceSpec + instanceStatus func() *InstanceStatus + expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) + wantErr bool }{ { - name: "Defaults", - eventObject: getEventObject(), - getOpenStackMachineSpec: getDefaultOpenStackMachineSpec, - getInstanceStatus: getDefaultInstanceStatus, + name: "Defaults", + eventObject: &infrav1.OpenStackMachine{}, + instanceSpec: getDefaultInstanceSpec, + instanceStatus: getDefaultInstanceStatus, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListAttachedInterfaces(instanceUUID).Return([]attachinterfaces.Interface{ { @@ -1114,18 +1074,18 @@ func TestService_DeleteInstance(t *testing.T) { }, { name: "Dangling volume", - eventObject: getEventObject(), - getOpenStackMachineSpec: func() *infrav1.OpenStackMachineSpec { - spec := getDefaultOpenStackMachineSpec() + eventObject: &infrav1.OpenStackMachine{}, + instanceSpec: func() *InstanceSpec { + spec := getDefaultInstanceSpec() spec.RootVolume = &infrav1.RootVolume{ Size: 50, } return spec }, - getInstanceStatus: func() *InstanceStatus { return nil }, + instanceStatus: func() *InstanceStatus { return nil }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { // Fetch volume by name - volumeName := fmt.Sprintf("%s-root", instanceName) + volumeName := fmt.Sprintf("%s-root", openStackMachineName) computeRecorder.ListVolumes(volumes.ListOpts{ AllTenants: false, Name: volumeName, @@ -1162,7 +1122,7 @@ func TestService_DeleteInstance(t *testing.T) { "", mockNetworkClient, logr.Discard(), ), } - if err := s.DeleteInstance(tt.eventObject, tt.getOpenStackMachineSpec(), instanceName, tt.getInstanceStatus()); (err != nil) != tt.wantErr { + if err := s.DeleteInstance(tt.eventObject, tt.instanceSpec(), tt.instanceStatus()); (err != nil) != tt.wantErr { t.Errorf("Service.DeleteInstance() error = %v, wantErr %v", err, tt.wantErr) } })