diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index ac23a9aa35..2dc01dd558 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -130,9 +130,13 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { scope.Logger.Info("Reconciling Cluster delete") - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { + deleted, err := deleteBastion(scope, cluster, openStackCluster) + if err != nil { return reconcile.Result{}, err } + if !deleted { + return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil + } networkingService, err := networking.NewService(scope) if err != nil { @@ -189,54 +193,51 @@ func contains(arr []string, target string) bool { return false } -func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { computeService, err := compute.NewService(scope) if err != nil { - return err + return false, err } networkingService, err := networking.NewService(scope) if err != nil { - return err + return false, err } instanceName := fmt.Sprintf("%s-bastion", cluster.Name) instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, instanceName) if err != nil { - return err + return false, err } if instanceStatus != nil { instanceNS, err := instanceStatus.NetworkStatus() if err != nil { - return err + return false, err } addresses := instanceNS.Addresses() for _, address := range addresses { if address.Type == corev1.NodeExternalIP { if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete floating IP: %v", err)) - return errors.Errorf("failed to delete floating IP: %v", err) + return false, errors.Errorf("failed to delete floating IP: %v", err) } } } } - machineSpec := &openStackCluster.Spec.Bastion.Instance - if err = computeService.DeleteInstance(openStackCluster, machineSpec, instanceName, instanceStatus); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion: %v", err)) - return errors.Errorf("failed to delete bastion: %v", err) + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) + if err = computeService.DeleteInstance(openStackCluster, instanceSpec, instanceStatus); err != nil { + return false, errors.Errorf("failed to delete bastion: %v", err) } openStackCluster.Status.Bastion = nil if err = networkingService.DeleteBastionSecurityGroup(openStackCluster, fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion security group: %v", err)) - return errors.Errorf("failed to delete bastion security group: %v", err) + return false, errors.Errorf("failed to delete bastion security group: %v", err) } openStackCluster.Status.BastionSecurityGroup = nil - return nil + return true, nil } func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { @@ -259,10 +260,6 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch return reconcile.Result{}, err } - if err = reconcileBastion(scope, cluster, openStackCluster); err != nil { - return reconcile.Result{}, err - } - availabilityZones, err := computeService.GetAvailabilityZones() if err != nil { return ctrl.Result{}, err @@ -288,14 +285,30 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch } } - openStackCluster.Status.Ready = true openStackCluster.Status.FailureMessage = nil openStackCluster.Status.FailureReason = nil + if !openStackCluster.Status.Ready { + openStackCluster.Status.Ready = true + + // If we're setting Ready, return early to update status and + // allow dependent operations to proceed. Ensure we call + // reconcile again to create the bastion. + return reconcile.Result{Requeue: true}, nil + } + + reconciled, err := reconcileBastion(scope, cluster, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + if !reconciled { + return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil + } + scope.Logger.Info("Reconciled Cluster create successfully") return reconcile.Result{}, nil } -func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { scope.Logger.Info("Reconciling Bastion") if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled { @@ -304,56 +317,87 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC computeService, err := compute.NewService(scope) if err != nil { - return err + return false, err } - instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, fmt.Sprintf("%s-bastion", cluster.Name)) + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) + instanceStatus, err := computeService.ReconcileInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) if err != nil { - return err + return false, errors.Errorf("failed to reconcile bastion: %v", err) } - if instanceStatus != nil { - bastion, err := instanceStatus.APIInstance(openStackCluster) - if err != nil { - return err - } - openStackCluster.Status.Bastion = bastion - return nil + if instanceStatus == nil { + // Bastion is not ready yet + return false, nil } - instanceStatus, err = computeService.CreateBastion(openStackCluster, cluster.Name) + // We overwrite the bastion status with the status of the instance from + // OpenStack. However, we don't want to lose any previously created + // floating IP which hasn't been associated yet, so we keep a reference + // to it here. + var floatingIP string + if openStackCluster.Status.Bastion != nil { + floatingIP = openStackCluster.Status.Bastion.FloatingIP + } + + bastion, err := instanceStatus.APIInstance(openStackCluster) if err != nil { - return errors.Errorf("failed to reconcile bastion: %v", err) + return false, err + } + openStackCluster.Status.Bastion = bastion + + // Bastion already has a floating IP + if bastion.FloatingIP != "" { + return true, nil } networkingService, err := networking.NewService(scope) if err != nil { - return err + return false, err } + clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) - fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.Bastion.Instance.FloatingIP) + fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, floatingIP) if err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to get or create floating IP for bastion: %v", err)) - return errors.Errorf("failed to get or create floating IP for bastion: %v", err) + return false, errors.Errorf("failed to get or create floating IP for bastion: %v", err) } + bastion.FloatingIP = fp.FloatingIP + port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) if err != nil { - err = errors.Errorf("getting management port for bastion: %v", err) - handleUpdateOSCError(openStackCluster, err) - return err + return false, errors.Errorf("getting management port for bastion: %v", err) } + err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID) if err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to associate floating IP with bastion: %v", err)) - return errors.Errorf("failed to associate floating IP with bastion: %v", err) + return false, errors.Errorf("failed to associate floating IP with bastion: %v", err) } - bastion, err := instanceStatus.APIInstance(openStackCluster) - if err != nil { - return err + return true, nil +} + +func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string) *compute.InstanceSpec { + name := fmt.Sprintf("%s-bastion", clusterName) + instanceSpec := &compute.InstanceSpec{ + Name: name, + Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, + SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, + Image: openStackCluster.Spec.Bastion.Instance.Image, + ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, + FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, + RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, } - bastion.FloatingIP = fp.FloatingIP - openStackCluster.Status.Bastion = bastion - return nil + + instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups + if openStackCluster.Spec.ManagedSecurityGroups { + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: openStackCluster.Status.BastionSecurityGroup.ID, + }) + } + + instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks + instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports + + return instanceSpec } func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 37e3d609ad..47c7617b3f 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -63,8 +63,8 @@ type OpenStackMachineReconciler struct { } const ( - waitForClusterInfrastructureReadyDuration = 15 * time.Second - waitForInstanceBecomeActiveToReconcile = 60 * time.Second + defaultOpenStackBackOff = 15 * time.Second + waitForInstanceBecomeActiveToReconcile = 60 * time.Second ) // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackmachines,verbs=get;list;watch;create;update;patch;delete @@ -248,7 +248,14 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope } } - if err := computeService.DeleteInstance(openStackMachine, &openStackMachine.Spec, openStackMachine.Name, instanceStatus); err != nil { + instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, "") + if err != nil { + err = errors.Errorf("machine spec is invalid: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) + return ctrl.Result{}, err + } + + if err := computeService.DeleteInstance(openStackMachine, instanceSpec, instanceStatus); err != nil { handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error deleting OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) return ctrl.Result{}, nil } @@ -277,7 +284,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope if !cluster.Status.InfrastructureReady { scope.Logger.Info("Cluster infrastructure is not ready yet, requeuing machine") - return ctrl.Result{RequeueAfter: waitForClusterInfrastructureReadyDuration}, nil + return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil } // Make sure bootstrap data is available and populated. @@ -303,16 +310,21 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(scope.Logger, cluster, openStackCluster, machine, openStackMachine, computeService, userData) + instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) if err != nil { - handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err)) + err = errors.Errorf("machine spec is invalid: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) return ctrl.Result{}, err } - // Set an error message if we couldn't find the instance. + instanceStatus, err := computeService.ReconcileInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name) + if err != nil { + err = errors.Errorf("openstack instance cannot be created: %v", err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) + return ctrl.Result{}, err + } if instanceStatus == nil { - handleUpdateMachineError(scope.Logger, openStackMachine, errors.New("OpenStack instance cannot be found")) - return ctrl.Result{}, nil + return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil } // TODO(sbueringer) From CAPA: TODO(ncdc): move this validation logic into a validating webhook (for us: create validation logic in webhook) @@ -384,21 +396,73 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { - instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) - if err != nil { - return nil, err +func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) (*compute.InstanceSpec, error) { + if openStackMachine == nil { + return nil, fmt.Errorf("create Options need be specified to create instace") } - if instanceStatus == nil { - logger.Info("Machine not exist, Creating Machine", "Machine", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackCluster, machine, openStackMachine, cluster.Name, userData) - if err != nil { - return nil, errors.Errorf("error creating Openstack instance: %v", err) + if machine.Spec.FailureDomain == nil { + return nil, fmt.Errorf("failure domain not set") + } + + instanceSpec := compute.InstanceSpec{ + Name: openStackMachine.Name, + Image: openStackMachine.Spec.Image, + ImageUUID: openStackMachine.Spec.ImageUUID, + Flavor: openStackMachine.Spec.Flavor, + SSHKeyName: openStackMachine.Spec.SSHKeyName, + UserData: userData, + Metadata: openStackMachine.Spec.ServerMetadata, + ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, + FailureDomain: *machine.Spec.FailureDomain, + RootVolume: openStackMachine.Spec.RootVolume, + Subnet: openStackMachine.Spec.Subnet, + ServerGroupID: openStackMachine.Spec.ServerGroupID, + Trunk: openStackMachine.Spec.Trunk, + } + + machineTags := []string{} + + // Append machine specific tags + machineTags = append(machineTags, openStackMachine.Spec.Tags...) + + // Append cluster scope tags + machineTags = append(machineTags, openStackCluster.Spec.Tags...) + + // tags need to be unique or the "apply tags" call will fail. + deduplicate := func(tags []string) []string { + seen := make(map[string]struct{}, len(machineTags)) + unique := make([]string, 0, len(machineTags)) + for _, tag := range tags { + if _, ok := seen[tag]; !ok { + seen[tag] = struct{}{} + unique = append(unique, tag) + } } + return unique } + machineTags = deduplicate(machineTags) + + instanceSpec.Tags = machineTags + + instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups + if openStackCluster.Spec.ManagedSecurityGroups { + var managedSecurityGroup string + if util.IsControlPlaneMachine(machine) { + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID + } else { + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID + } + + instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ + UUID: managedSecurityGroup, + }) + } + + instanceSpec.Networks = openStackMachine.Spec.Networks + instanceSpec.Ports = openStackMachine.Spec.Ports - return instanceStatus, nil + return &instanceSpec, nil } func handleUpdateMachineError(logger logr.Logger, openstackMachine *infrav1.OpenStackMachine, message error) { diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go new file mode 100644 index 0000000000..d795378e8a --- /dev/null +++ b/controllers/openstackmachine_controller_test.go @@ -0,0 +1,226 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" +) + +const ( + networkUUID = "d412171b-9fd7-41c1-95a6-c24e5953974d" + subnetUUID = "d2d8d98d-b234-477e-a547-868b7cb5d6a5" + extraSecurityGroupUUID = "514bb2d8-3390-4a3b-86a7-7864ba57b329" + controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" + workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" + serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" + + openStackMachineName = "test-openstack-machine" + namespace = "test-namespace" + imageName = "test-image" + flavorName = "test-flavor" + sshKeyName = "test-ssh-key" + failureDomain = "test-failure-domain" +) + +func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.Network{ + ID: networkUUID, + Subnet: &infrav1.Subnet{ + ID: subnetUUID, + }, + }, + ControlPlaneSecurityGroup: &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID}, + WorkerSecurityGroup: &infrav1.SecurityGroup{ID: workerSecurityGroupUUID}, + }, + } +} + +func getDefaultMachine() *clusterv1.Machine { + return &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + FailureDomain: pointer.StringPtr(failureDomain), + }, + } +} + +func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: openStackMachineName, + Namespace: namespace, + }, + Spec: infrav1.OpenStackMachineSpec{ + // ProviderID is set by the controller + // InstanceID is set by the controller + // FloatingIP is only used by the cluster controller for the Bastion + // TODO: Test Networks, Ports, Subnet, and Trunk separately + CloudName: "test-cloud", + Flavor: flavorName, + Image: imageName, + SSHKeyName: sshKeyName, + Tags: []string{"test-tag"}, + ServerMetadata: map[string]string{ + "test-metadata": "test-value", + }, + ConfigDrive: pointer.BoolPtr(true), + ServerGroupID: serverGroupUUID, + }, + } +} + +func getDefaultInstanceSpec() *compute.InstanceSpec { + return &compute.InstanceSpec{ + Name: openStackMachineName, + Image: imageName, + Flavor: flavorName, + SSHKeyName: sshKeyName, + UserData: "user-data", + Metadata: map[string]string{ + "test-metadata": "test-value", + }, + ConfigDrive: *pointer.BoolPtr(true), + FailureDomain: *pointer.StringPtr(failureDomain), + ServerGroupID: serverGroupUUID, + Tags: []string{"test-tag"}, + } +} + +func Test_machineToInstanceSpec(t *testing.T) { + RegisterTestingT(t) + + tests := []struct { + name string + openStackCluster func() *infrav1.OpenStackCluster + machine func() *clusterv1.Machine + openStackMachine func() *infrav1.OpenStackMachine + wantInstanceSpec func() *compute.InstanceSpec + wantErr bool + }{ + { + name: "Defaults", + openStackCluster: getDefaultOpenStackCluster, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: getDefaultInstanceSpec, + wantErr: false, + }, + { + name: "Control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabelName: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: controlPlaneSecurityGroupUUID}} + return i + }, + wantErr: false, + }, + { + name: "Worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: workerSecurityGroupUUID}} + return i + }, + wantErr: false, + }, + { + name: "Extra security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.ManagedSecurityGroups = true + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: extraSecurityGroupUUID}} + return m + }, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.SecurityGroups = []infrav1.SecurityGroupParam{ + {UUID: extraSecurityGroupUUID}, + {UUID: workerSecurityGroupUUID}, + } + return i + }, + wantErr: false, + }, + { + name: "Tags", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Spec.Tags = []string{"cluster-tag", "duplicate-tag"} + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.Tags = []string{"machine-tag", "duplicate-tag"} + return m + }, + wantInstanceSpec: func() *compute.InstanceSpec { + i := getDefaultInstanceSpec() + i.Tags = []string{"machine-tag", "duplicate-tag", "cluster-tag"} + return i + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := machineToInstanceSpec(tt.openStackCluster(), tt.machine(), tt.openStackMachine(), "user-data") + if tt.wantErr { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).NotTo(HaveOccurred()) + } + + Expect(got).To(Equal(tt.wantInstanceSpec())) + }) + } +} diff --git a/pkg/cloud/services/compute/bastion.go b/pkg/cloud/services/compute/bastion.go deleted file mode 100644 index abce789f5e..0000000000 --- a/pkg/cloud/services/compute/bastion.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - - infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" -) - -func (s *Service) CreateBastion(openStackCluster *infrav1.OpenStackCluster, clusterName string) (*InstanceStatus, error) { - name := fmt.Sprintf("%s-bastion", clusterName) - instanceSpec := &InstanceSpec{ - Name: name, - Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, - SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, - Image: openStackCluster.Spec.Bastion.Instance.Image, - ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, - FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, - RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, - } - - securityGroups, err := s.networkingService.GetSecurityGroups(openStackCluster.Spec.Bastion.Instance.SecurityGroups) - if err != nil { - return nil, err - } - if openStackCluster.Spec.ManagedSecurityGroups { - securityGroups = append(securityGroups, openStackCluster.Status.BastionSecurityGroup.ID) - } - instanceSpec.SecurityGroups = securityGroups - - var nets []infrav1.Network - if len(openStackCluster.Spec.Bastion.Instance.Networks) > 0 { - var err error - nets, err = s.getServerNetworks(openStackCluster.Spec.Bastion.Instance.Networks) - if err != nil { - return nil, err - } - } else { - nets = []infrav1.Network{{ - ID: openStackCluster.Status.Network.ID, - Subnet: &infrav1.Subnet{ - ID: openStackCluster.Status.Network.Subnet.ID, - }, - }} - } - instanceSpec.Networks = nets - - return s.createInstance(openStackCluster, clusterName, instanceSpec, retryIntervalInstanceStatus) -} diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index d170287888..36c78e8c1d 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -30,7 +30,6 @@ import ( "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" @@ -44,108 +43,24 @@ const ( timeoutInstanceDelete = 5 * time.Minute ) -func (s *Service) CreateInstance(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string) (instance *InstanceStatus, err error) { - return s.createInstanceImpl(openStackCluster, machine, openStackMachine, clusterName, userData, retryIntervalInstanceStatus) -} - -func (s *Service) createInstanceImpl(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, clusterName string, userData string, retryInterval time.Duration) (instance *InstanceStatus, err error) { - if openStackMachine == nil { - return nil, fmt.Errorf("create Options need be specified to create instace") - } - - if machine.Spec.FailureDomain == nil { - return nil, fmt.Errorf("failure domain not set") - } - - instanceSpec := InstanceSpec{ - Name: openStackMachine.Name, - Image: openStackMachine.Spec.Image, - ImageUUID: openStackMachine.Spec.ImageUUID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: openStackMachine.Spec.ServerMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - FailureDomain: *machine.Spec.FailureDomain, - RootVolume: openStackMachine.Spec.RootVolume, - Subnet: openStackMachine.Spec.Subnet, - ServerGroupID: openStackMachine.Spec.ServerGroupID, - } - - // verify that trunk is supported if set at instance level. - if openStackMachine.Spec.Trunk { - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return nil, err - } - if !trunkSupported { - return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") - } - instanceSpec.Trunk = true - } - machineTags := []string{} - - // Append machine specific tags - machineTags = append(machineTags, openStackMachine.Spec.Tags...) - - // Append cluster scope tags - machineTags = append(machineTags, openStackCluster.Spec.Tags...) - - // tags need to be unique or the "apply tags" call will fail. - machineTags = deduplicate(machineTags) - - instanceSpec.Tags = machineTags - - // Get security groups - securityGroups, err := s.networkingService.GetSecurityGroups(openStackMachine.Spec.SecurityGroups) - if err != nil { - return nil, err - } - if openStackCluster.Spec.ManagedSecurityGroups { - if util.IsControlPlaneMachine(machine) { - securityGroups = append(securityGroups, openStackCluster.Status.ControlPlaneSecurityGroup.ID) - } else { - securityGroups = append(securityGroups, openStackCluster.Status.WorkerSecurityGroup.ID) - } - } - instanceSpec.SecurityGroups = securityGroups +// constructNetworks builds an array of networks from the network, subnet and ports items in the instance spec. +// If no networks or ports are in the spec, returns a single network item for a network connection to the default cluster network. +func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec) ([]infrav1.Network, error) { + trunkRequired := false - nets, err := s.constructNetworks(openStackCluster, openStackMachine) + nets, err := s.getServerNetworks(instanceSpec.Networks) if err != nil { return nil, err } - trunkConfigured := s.isTrunkConfigured(nets, instanceSpec.Trunk) - if trunkConfigured { - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return nil, err - } - if !trunkSupported { - return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") - } - } - instanceSpec.Networks = nets - - return s.createInstance(openStackMachine, clusterName, &instanceSpec, retryInterval) -} - -// constructNetworks builds an array of networks from the network, subnet and ports items in the machine spec. -// If no networks or ports are in the spec, returns a single network item for a network connection to the default cluster network. -func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine) ([]infrav1.Network, error) { - var nets []infrav1.Network - if len(openStackMachine.Spec.Networks) > 0 { - var err error - nets, err = s.getServerNetworks(openStackMachine.Spec.Networks) - if err != nil { - return nil, err - } - } - for i, port := range openStackMachine.Spec.Ports { - pOpts := &openStackMachine.Spec.Ports[i] + for i, port := range instanceSpec.Ports { + pOpts := &instanceSpec.Ports[i] // No Trunk field specified for the port, inherit openStackMachine.Spec.Trunk. if pOpts.Trunk == nil { - pOpts.Trunk = &openStackMachine.Spec.Trunk + pOpts.Trunk = &instanceSpec.Trunk + } + if *pOpts.Trunk { + trunkRequired = true } if port.Network != nil { netID := port.Network.ID @@ -184,14 +99,39 @@ func (s *Service) constructNetworks(openStackCluster *infrav1.OpenStackCluster, ID: openStackCluster.Status.Network.Subnet.ID, }, PortOpts: &infrav1.PortOpts{ - Trunk: &openStackMachine.Spec.Trunk, + Trunk: &instanceSpec.Trunk, }, }} + trunkRequired = instanceSpec.Trunk + } + + if trunkRequired { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return nil, err + } + if !trunkSupported { + return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") + } } + return nets, nil } -func (s *Service) createInstance(eventObject runtime.Object, clusterName string, instanceSpec *InstanceSpec, retryInterval time.Duration) (*InstanceStatus, error) { +// ReconcileInstance is a common function for reconciling an OpenStack instance and its dependencies used by both Machines and the Bastion. +func (s *Service) ReconcileInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) { + return s.reconcileInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus) +} + +func (s *Service) reconcileInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) { + instanceStatus, err := s.GetInstanceStatusByName(eventObject, instanceSpec.Name) + if err != nil { + return nil, fmt.Errorf("failed to get instance status for %s: %w", instanceSpec.Name, err) + } + if instanceStatus != nil { + return instanceStatus, nil + } + var server *ServerExt accessIPv4 := "" portList := []servers.Network{} @@ -217,11 +157,21 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, } if err := s.deletePorts(eventObject, portList); err != nil { - s.scope.Logger.V(4).Error(err, "failed to clean up ports after failure", "cluster", clusterName, "machine", instanceSpec.Name) + s.scope.Logger.V(4).Error(err, "Failed to clean up ports after failure") } }() - for i, network := range instanceSpec.Networks { + nets, err := s.constructNetworks(openStackCluster, instanceSpec) + if err != nil { + return nil, err + } + + securityGroups, err := s.networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + for i, network := range nets { if network.ID == "" { return nil, fmt.Errorf("no network was found or provided. Please check your machine configuration and try again") } @@ -230,7 +180,7 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, iTags = instanceSpec.Tags } portName := getPortName(instanceSpec.Name, network.PortOpts, i) - port, err := s.networkingService.GetOrCreatePort(eventObject, clusterName, portName, network, &instanceSpec.SecurityGroups, iTags) + port, err := s.networkingService.GetOrCreatePort(eventObject, clusterName, portName, network, &securityGroups, iTags) if err != nil { return nil, err } @@ -253,7 +203,7 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, AvailabilityZone: instanceSpec.FailureDomain, Networks: portList, UserData: []byte(instanceSpec.UserData), - SecurityGroups: instanceSpec.SecurityGroups, + SecurityGroups: securityGroups, Tags: instanceSpec.Tags, Metadata: instanceSpec.Metadata, ConfigDrive: &instanceSpec.ConfigDrive, @@ -565,7 +515,7 @@ func (s *Service) GetManagementPort(openStackCluster *infrav1.OpenStackCluster, return &allPorts[0], nil } -func (s *Service) DeleteInstance(eventObject runtime.Object, openStackMachineSpec *infrav1.OpenStackMachineSpec, instanceName string, instanceStatus *InstanceStatus) error { +func (s *Service) DeleteInstance(eventObject runtime.Object, instanceSpec *InstanceSpec, instanceStatus *InstanceStatus) error { if instanceStatus == nil { /* We create a boot-from-volume instance in 2 steps: @@ -585,9 +535,9 @@ func (s *Service) DeleteInstance(eventObject runtime.Object, openStackMachineSpe Note that we don't need to separately delete the root volume when deleting the instance because DeleteOnTermination will ensure it is deleted in that case. */ - rootVolume := openStackMachineSpec.RootVolume + rootVolume := instanceSpec.RootVolume if hasRootVolume(rootVolume) { - name := rootVolumeName(instanceName) + name := rootVolumeName(instanceSpec.Name) volume, err := s.getVolumeByName(name) if err != nil { return err @@ -753,23 +703,6 @@ func (s *Service) GetInstanceStatusByName(eventObject runtime.Object, name strin return nil, nil } -// deduplicate takes a slice of input strings and filters out any duplicate -// string occurrences, for example making ["a", "b", "a", "c"] become ["a", "b", -// "c"]. -func deduplicate(sequence []string) []string { - var unique []string - set := make(map[string]bool) - - for _, s := range sequence { - if _, ok := set[s]; !ok { - unique = append(unique, s) - set[s] = true - } - } - - return unique -} - func getTimeout(name string, timeout int) time.Duration { if v := os.Getenv(name); v != "" { timeout, err := strconv.Atoi(v) @@ -791,19 +724,3 @@ func (s *Service) isTrunkExtSupported() (trunknSupported bool, err error) { } return true, nil } - -// isTrunkConfigured verifies trunk configuration at instance and port levels, useful for avoiding multple api calls to verify trunk support. -func (s *Service) isTrunkConfigured(nets []infrav1.Network, instanceLevelTrunk bool) bool { - if instanceLevelTrunk { - return true - } - for _, net := range nets { - port := net.PortOpts - if port != nil { - if port.Trunk != nil && *port.Trunk { - return true - } - } - } - return false -} diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index cccb1418c6..2c255d698c 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -33,7 +33,6 @@ import ( "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" @@ -41,10 +40,8 @@ import ( . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" gomegatypes "github.com/onsi/gomega/types" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" @@ -525,7 +522,6 @@ const ( imageUUID = "652b5a05-27fa-41d4-ac82-3e63cf6f7ab7" flavorUUID = "6dc820db-f912-454e-a1e3-1081f3b8cc72" instanceUUID = "383a8ec1-b6ea-4493-99dd-fc790da04ba9" - extraSecurityGroupUUID = "514bb2d8-3390-4a3b-86a7-7864ba57b329" controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" @@ -533,14 +529,12 @@ const ( openStackMachineName = "test-openstack-machine" portName = "test-openstack-machine-0" - namespace = "test-namespace" imageName = "test-image" flavorName = "test-flavor" sshKeyName = "test-ssh-key" + failureDomain = "test-failure-domain" ) -var failureDomain = "test-failure-domain" - func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { return &infrav1.OpenStackCluster{ Spec: infrav1.OpenStackClusterSpec{}, @@ -557,40 +551,25 @@ func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { } } -func getDefaultMachine() *clusterv1.Machine { - return &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - FailureDomain: &failureDomain, +func getDefaultInstanceSpec() *InstanceSpec { + return &InstanceSpec{ + Name: openStackMachineName, + Image: imageName, + Flavor: flavorName, + SSHKeyName: sshKeyName, + UserData: "user-data", + Metadata: map[string]string{ + "test-metadata": "test-value", }, + ConfigDrive: *pointer.BoolPtr(true), + FailureDomain: *pointer.StringPtr(failureDomain), + ServerGroupID: serverGroupUUID, + Tags: []string{"test-tag"}, + SecurityGroups: []infrav1.SecurityGroupParam{{UUID: workerSecurityGroupUUID}}, } } -func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { - return &infrav1.OpenStackMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: openStackMachineName, - Namespace: namespace, - }, - Spec: infrav1.OpenStackMachineSpec{ - // ProviderID is set by the controller - // InstanceID is set by the controller - // FloatingIP is only used by the cluster controller for the Bastion - // TODO: Test Networks, Ports, Subnet, and Trunk separately - CloudName: "test-cloud", - Flavor: flavorName, - Image: imageName, - SSHKeyName: sshKeyName, - Tags: []string{"test-tag"}, - ServerMetadata: map[string]string{ - "test-metadata": "test-value", - }, - ConfigDrive: pointer.BoolPtr(true), - ServerGroupID: serverGroupUUID, - }, - } -} - -func TestService_CreateInstance(t *testing.T) { +func TestService_ReconcileInstance(t *testing.T) { RegisterTestingT(t) getDefaultServerMap := func() map[string]interface{} { @@ -603,6 +582,9 @@ func TestService_CreateInstance(t *testing.T) { "imageRef": imageUUID, "flavorRef": flavorUUID, "availability_zone": failureDomain, + "security_groups": []map[string]interface{}{ + {"name": workerSecurityGroupUUID}, + }, "networks": []map[string]interface{}{ {"port": portUUID}, }, @@ -686,6 +668,10 @@ func TestService_CreateInstance(t *testing.T) { computeRecorder.GetFlavorIDFromName(flavorName).Return(flavorUUID, nil) } + expectGetDefaultServerByName := func(computeRecorder *MockClientMockRecorder) { + computeRecorder.ListServers(servers.ListOpts{Name: fmt.Sprintf("^%s$", openStackMachineName)}).Return([]ServerExt{}, nil) + } + // Expected calls and custom match function for creating a server expectCreateServer := func(computeRecorder *MockClientMockRecorder, expectedCreateOpts map[string]interface{}, wantError bool) { // This nonsense is because ConfigDrive is a bool pointer, so we @@ -741,18 +727,14 @@ func TestService_CreateInstance(t *testing.T) { // ******************* tests := []struct { - name string - getMachine func() *clusterv1.Machine - getOpenStackCluster func() *infrav1.OpenStackCluster - getOpenStackMachine func() *infrav1.OpenStackMachine - expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) - wantErr bool + name string + getInstanceSpec func() *InstanceSpec + expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) + wantErr bool }{ { - name: "Defaults", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Defaults", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -763,10 +745,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Delete ports on server create error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Delete ports on server create error", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -779,16 +759,14 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Delete previously created ports on port creation error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Ports = []infrav1.PortOpts{ + name: "Delete previously created ports on port creation error", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.Ports = []infrav1.PortOpts{ {Description: "Test port 0"}, {Description: "Test port 1"}, } - return m + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{{ID: imageUUID}}, nil) @@ -808,10 +786,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Poll until server is created", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Poll until server is created", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -822,10 +798,8 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Server errors during creation", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: getDefaultOpenStackMachine, + name: "Server errors during creation", + getInstanceSpec: getDefaultInstanceSpec, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) expectDefaultImageAndFlavor(computeRecorder) @@ -838,15 +812,13 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Boot from volume success", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume success", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -883,17 +855,15 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Boot from volume with explicit AZ and volume type", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume with explicit AZ and volume type", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, AvailabilityZone: "test-alternate-az", VolumeType: "test-volume-type", } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -931,15 +901,13 @@ func TestService_CreateInstance(t *testing.T) { wantErr: false, }, { - name: "Boot from volume failure cleans up ports", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.RootVolume = &infrav1.RootVolume{ + name: "Boot from volume failure cleans up ports", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ Size: 50, } - return osMachine + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { expectUseExistingDefaultPort(networkRecorder) @@ -962,100 +930,14 @@ func TestService_CreateInstance(t *testing.T) { wantErr: true, }, { - name: "Set control plane security group", - getMachine: func() *clusterv1.Machine { - machine := getDefaultMachine() - machine.Labels = map[string]string{ - clusterv1.MachineControlPlaneLabelName: "true", - } - return machine - }, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: getDefaultOpenStackMachine, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": controlPlaneSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Set worker security group", - getMachine: getDefaultMachine, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: getDefaultOpenStackMachine, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": workerSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Set extra security group", - getMachine: getDefaultMachine, - getOpenStackCluster: func() *infrav1.OpenStackCluster { - osCluster := getDefaultOpenStackCluster() - osCluster.Spec.ManagedSecurityGroups = true - return osCluster - }, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - osMachine := getDefaultOpenStackMachine() - osMachine.Spec.SecurityGroups = []infrav1.SecurityGroupParam{{UUID: extraSecurityGroupUUID}} - return osMachine - }, - expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { - expectUseExistingDefaultPort(networkRecorder) - expectDefaultImageAndFlavor(computeRecorder) - - // TODO: Shortcut this API call if security groups are passed by UUID - networkRecorder.ListSecGroup(groups.ListOpts{ID: extraSecurityGroupUUID}). - Return([]groups.SecGroup{{ID: extraSecurityGroupUUID}}, nil) - - createMap := getDefaultServerMap() - serverMap := createMap["server"].(map[string]interface{}) - serverMap["security_groups"] = []map[string]interface{}{ - {"name": extraSecurityGroupUUID}, - {"name": workerSecurityGroupUUID}, - } - expectCreateServer(computeRecorder, createMap, false) - expectServerPollSuccess(computeRecorder) - }, - wantErr: false, - }, - { - name: "Delete trunks on port creation error", - getMachine: getDefaultMachine, - getOpenStackCluster: getDefaultOpenStackCluster, - getOpenStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Ports = []infrav1.PortOpts{ + name: "Delete trunks on port creation error", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.Ports = []infrav1.PortOpts{ {Description: "Test port 0", Trunk: pointer.BoolPtr(true)}, {Description: "Test port 1"}, } - return m + return s }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{{ID: imageUUID}}, nil) @@ -1116,6 +998,7 @@ func TestService_CreateInstance(t *testing.T) { computeRecorder := mockComputeClient.EXPECT() networkRecorder := mockNetworkClient.EXPECT() + expectGetDefaultServerByName(computeRecorder) tt.expect(computeRecorder, networkRecorder) s := Service{ @@ -1129,7 +1012,7 @@ func TestService_CreateInstance(t *testing.T) { ), } // Call CreateInstance with a reduced retry interval to speed up the test - _, err := s.createInstanceImpl(tt.getOpenStackCluster(), tt.getMachine(), tt.getOpenStackMachine(), "cluster-name", "user-data", time.Second) + _, err := s.reconcileInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return @@ -1141,15 +1024,6 @@ func TestService_CreateInstance(t *testing.T) { func TestService_DeleteInstance(t *testing.T) { RegisterTestingT(t) - const instanceUUID = "7b8a2800-c615-4f52-9b75-d2ba60a2af66" - const portUUID = "94f3e9cb-89d5-4313-ad6d-44035722342b" - - const instanceName = "test-instance" - - getEventObject := func() runtime.Object { - return &infrav1.OpenStackMachine{} - } - getDefaultInstanceStatus := func() *InstanceStatus { return &InstanceStatus{ server: &ServerExt{ @@ -1160,27 +1034,23 @@ func TestService_DeleteInstance(t *testing.T) { } } - getDefaultOpenStackMachineSpec := func() *infrav1.OpenStackMachineSpec { - return &getDefaultOpenStackMachine().Spec - } - // ******************* // START OF TEST CASES // ******************* tests := []struct { - name string - eventObject runtime.Object - getOpenStackMachineSpec func() *infrav1.OpenStackMachineSpec - getInstanceStatus func() *InstanceStatus - expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) - wantErr bool + name string + eventObject runtime.Object + instanceSpec func() *InstanceSpec + instanceStatus func() *InstanceStatus + expect func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) + wantErr bool }{ { - name: "Defaults", - eventObject: getEventObject(), - getOpenStackMachineSpec: getDefaultOpenStackMachineSpec, - getInstanceStatus: getDefaultInstanceStatus, + name: "Defaults", + eventObject: &infrav1.OpenStackMachine{}, + instanceSpec: getDefaultInstanceSpec, + instanceStatus: getDefaultInstanceStatus, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { computeRecorder.ListAttachedInterfaces(instanceUUID).Return([]attachinterfaces.Interface{ { @@ -1204,18 +1074,18 @@ func TestService_DeleteInstance(t *testing.T) { }, { name: "Dangling volume", - eventObject: getEventObject(), - getOpenStackMachineSpec: func() *infrav1.OpenStackMachineSpec { - spec := getDefaultOpenStackMachineSpec() + eventObject: &infrav1.OpenStackMachine{}, + instanceSpec: func() *InstanceSpec { + spec := getDefaultInstanceSpec() spec.RootVolume = &infrav1.RootVolume{ Size: 50, } return spec }, - getInstanceStatus: func() *InstanceStatus { return nil }, + instanceStatus: func() *InstanceStatus { return nil }, expect: func(computeRecorder *MockClientMockRecorder, networkRecorder *mock_networking.MockNetworkClientMockRecorder) { // Fetch volume by name - volumeName := fmt.Sprintf("%s-root", instanceName) + volumeName := fmt.Sprintf("%s-root", openStackMachineName) computeRecorder.ListVolumes(volumes.ListOpts{ AllTenants: false, Name: volumeName, @@ -1252,7 +1122,7 @@ func TestService_DeleteInstance(t *testing.T) { "", mockNetworkClient, logr.Discard(), ), } - if err := s.DeleteInstance(tt.eventObject, tt.getOpenStackMachineSpec(), instanceName, tt.getInstanceStatus()); (err != nil) != tt.wantErr { + if err := s.DeleteInstance(tt.eventObject, tt.instanceSpec(), tt.instanceStatus()); (err != nil) != tt.wantErr { t.Errorf("Service.DeleteInstance() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/pkg/cloud/services/compute/instance_types.go b/pkg/cloud/services/compute/instance_types.go index 495fca1f1c..7eefeb8384 100644 --- a/pkg/cloud/services/compute/instance_types.go +++ b/pkg/cloud/services/compute/instance_types.go @@ -46,8 +46,9 @@ type InstanceSpec struct { ServerGroupID string Trunk bool Tags []string - SecurityGroups []string - Networks []infrav1.Network + SecurityGroups []infrav1.SecurityGroupParam + Networks []infrav1.NetworkParam + Ports []infrav1.PortOpts } // InstanceIdentifier describes an instance which has not necessarily been fetched. diff --git a/pkg/cloud/services/networking/securitygroups.go b/pkg/cloud/services/networking/securitygroups.go index c339a629a4..7ed2b6968f 100644 --- a/pkg/cloud/services/networking/securitygroups.go +++ b/pkg/cloud/services/networking/securitygroups.go @@ -393,6 +393,15 @@ func (s *Service) generateDesiredSecGroups(openStackCluster *infrav1.OpenStackCl func (s *Service) GetSecurityGroups(securityGroupParams []infrav1.SecurityGroupParam) ([]string, error) { var sgIDs []string for _, sg := range securityGroupParams { + // Don't validate an explicit UUID if we were given one + if sg.UUID != "" { + if isDuplicate(sgIDs, sg.UUID) { + continue + } + sgIDs = append(sgIDs, sg.UUID) + continue + } + listOpts := groups.ListOpts(sg.Filter) if listOpts.ProjectID == "" { listOpts.ProjectID = s.projectID