diff --git a/api/v1alpha5/conversion.go b/api/v1alpha5/conversion.go index 8fec9715e8..3f41962b92 100644 --- a/api/v1alpha5/conversion.go +++ b/api/v1alpha5/conversion.go @@ -433,3 +433,7 @@ func Convert_v1alpha5_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( return nil } + +func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s conversion.Scope) error { + return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in, out, s) +} diff --git a/api/v1alpha5/zz_generated.conversion.go b/api/v1alpha5/zz_generated.conversion.go index ef43a3c824..0ad34a27d8 100644 --- a/api/v1alpha5/zz_generated.conversion.go +++ b/api/v1alpha5/zz_generated.conversion.go @@ -194,11 +194,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha7.OpenStackMachineSpec)(nil), (*OpenStackMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(a.(*v1alpha7.OpenStackMachineSpec), b.(*OpenStackMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*OpenStackMachineStatus)(nil), (*v1alpha7.OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha5_OpenStackMachineStatus_To_v1alpha7_OpenStackMachineStatus(a.(*OpenStackMachineStatus), b.(*v1alpha7.OpenStackMachineStatus), scope) }); err != nil { @@ -394,6 +389,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha7.OpenStackMachineSpec)(nil), (*OpenStackMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(a.(*v1alpha7.OpenStackMachineSpec), b.(*OpenStackMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha7.PortOpts)(nil), (*PortOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha7_PortOpts_To_v1alpha5_PortOpts(a.(*v1alpha7.PortOpts), b.(*PortOpts), scope) }); err != nil { @@ -1134,16 +1134,12 @@ func autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec( out.ServerMetadata = *(*map[string]string)(unsafe.Pointer(&in.ServerMetadata)) out.ConfigDrive = (*bool)(unsafe.Pointer(in.ConfigDrive)) out.RootVolume = (*RootVolume)(unsafe.Pointer(in.RootVolume)) + // WARNING: in.AdditionalBlockDevices requires manual conversion: does not exist in peer-type out.ServerGroupID = in.ServerGroupID out.IdentityRef = (*OpenStackIdentityReference)(unsafe.Pointer(in.IdentityRef)) return nil } -// Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in *v1alpha7.OpenStackMachineSpec, out *OpenStackMachineSpec, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in, out, s) -} - func autoConvert_v1alpha5_OpenStackMachineStatus_To_v1alpha7_OpenStackMachineStatus(in *OpenStackMachineStatus, out *v1alpha7.OpenStackMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) diff --git a/api/v1alpha6/conversion.go b/api/v1alpha6/conversion.go index 379eb17eac..0882aac11c 100644 --- a/api/v1alpha6/conversion.go +++ b/api/v1alpha6/conversion.go @@ -60,13 +60,12 @@ func restorev1alpha7MachineSpec(previous *infrav1.OpenStackMachineSpec, dst *inf // PropagateUplinkStatus has been added in v1alpha7. // We restore the whole Ports since they are anyway immutable. dst.Ports = previous.Ports + dst.AdditionalBlockDevices = previous.AdditionalBlockDevices } func restorev1alpha7Bastion(previous **infrav1.Bastion, dst **infrav1.Bastion) { - // PropagateUplinkStatus has been added in v1alpha7. - // We restore the whole Ports since they are anyway immutable. - if *previous != nil && (*previous).Instance.Ports != nil && *dst != nil && (*dst).Instance.Ports != nil { - (*dst).Instance.Ports = (*previous).Instance.Ports + if *previous != nil && *dst != nil { + restorev1alpha7MachineSpec(&(*previous).Instance, &(*dst).Instance) } } @@ -646,3 +645,7 @@ func Convert_v1alpha6_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( return nil } + +func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s apiconversion.Scope) error { + return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in, out, s) +} diff --git a/api/v1alpha6/zz_generated.conversion.go b/api/v1alpha6/zz_generated.conversion.go index 79eedbedce..dc87a6d961 100644 --- a/api/v1alpha6/zz_generated.conversion.go +++ b/api/v1alpha6/zz_generated.conversion.go @@ -204,11 +204,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha7.OpenStackMachineSpec)(nil), (*OpenStackMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(a.(*v1alpha7.OpenStackMachineSpec), b.(*OpenStackMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*OpenStackMachineStatus)(nil), (*v1alpha7.OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha6_OpenStackMachineStatus_To_v1alpha7_OpenStackMachineStatus(a.(*OpenStackMachineStatus), b.(*v1alpha7.OpenStackMachineStatus), scope) }); err != nil { @@ -404,6 +399,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha7.OpenStackMachineSpec)(nil), (*OpenStackMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(a.(*v1alpha7.OpenStackMachineSpec), b.(*OpenStackMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha7.PortOpts)(nil), (*PortOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha7_PortOpts_To_v1alpha6_PortOpts(a.(*v1alpha7.PortOpts), b.(*PortOpts), scope) }); err != nil { @@ -1157,16 +1157,12 @@ func autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec( out.ServerMetadata = *(*map[string]string)(unsafe.Pointer(&in.ServerMetadata)) out.ConfigDrive = (*bool)(unsafe.Pointer(in.ConfigDrive)) out.RootVolume = (*RootVolume)(unsafe.Pointer(in.RootVolume)) + // WARNING: in.AdditionalBlockDevices requires manual conversion: does not exist in peer-type out.ServerGroupID = in.ServerGroupID out.IdentityRef = (*OpenStackIdentityReference)(unsafe.Pointer(in.IdentityRef)) return nil } -// Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in *v1alpha7.OpenStackMachineSpec, out *OpenStackMachineSpec, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in, out, s) -} - func autoConvert_v1alpha6_OpenStackMachineStatus_To_v1alpha7_OpenStackMachineStatus(in *OpenStackMachineStatus, out *v1alpha7.OpenStackMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) diff --git a/api/v1alpha7/openstackmachine_types.go b/api/v1alpha7/openstackmachine_types.go index 5ef4a39b67..0ed7ca2bc0 100644 --- a/api/v1alpha7/openstackmachine_types.go +++ b/api/v1alpha7/openstackmachine_types.go @@ -84,6 +84,11 @@ type OpenStackMachineSpec struct { // The volume metadata to boot from RootVolume *RootVolume `json:"rootVolume,omitempty"` + // AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance + // +listType=map + // +listMapKey=name + AdditionalBlockDevices []AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"` + // The server group to assign the machine to ServerGroupID string `json:"serverGroupID,omitempty"` diff --git a/api/v1alpha7/types.go b/api/v1alpha7/types.go index 0a9d31f570..049cae2965 100644 --- a/api/v1alpha7/types.go +++ b/api/v1alpha7/types.go @@ -163,6 +163,20 @@ type RootVolume struct { AvailabilityZone string `json:"availabilityZone,omitempty"` } +type AdditionalBlockDevice struct { + // Name of the Cinder volume in the context of a machine. + // It will be combined with the machine name to make the actual volume name. + Name string `json:"name"` + // Size is the size in GB of the volume. + Size int `json:"diskSize"` + // VolumeType is the volume type of the volume. + // If omitted, the default type will be used. + VolumeType string `json:"volumeType,omitempty"` + // AvailabilityZone is the volume availability zone to create the volume in. + // If omitted, the availability zone of the server will be used. + AvailabilityZone string `json:"availabilityZone,omitempty"` +} + // NetworkStatus contains basic information about an existing neutron network. type NetworkStatus struct { Name string `json:"name"` diff --git a/api/v1alpha7/zz_generated.deepcopy.go b/api/v1alpha7/zz_generated.deepcopy.go index bbccd1e7d6..a6ba842aa7 100644 --- a/api/v1alpha7/zz_generated.deepcopy.go +++ b/api/v1alpha7/zz_generated.deepcopy.go @@ -52,6 +52,21 @@ func (in *APIServerLoadBalancer) DeepCopy() *APIServerLoadBalancer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalBlockDevice) DeepCopyInto(out *AdditionalBlockDevice) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalBlockDevice. +func (in *AdditionalBlockDevice) DeepCopy() *AdditionalBlockDevice { + if in == nil { + return nil + } + out := new(AdditionalBlockDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddressPair) DeepCopyInto(out *AddressPair) { *out = *in @@ -628,6 +643,11 @@ func (in *OpenStackMachineSpec) DeepCopyInto(out *OpenStackMachineSpec) { *out = new(RootVolume) **out = **in } + if in.AdditionalBlockDevices != nil { + in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices + *out = make([]AdditionalBlockDevice, len(*in)) + copy(*out, *in) + } if in.IdentityRef != nil { in, out := &in.IdentityRef, &out.IdentityRef *out = new(OpenStackIdentityReference) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml index 5dcbcf227a..b5df40bd86 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml @@ -3770,6 +3770,36 @@ spec: instance: description: Instance for the bastion itself properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications + for additional block devices to attach to the server instance + items: + properties: + availabilityZone: + description: AvailabilityZone is the volume availability + zone to create the volume in. If omitted, the availability + zone of the server will be used. + type: string + diskSize: + description: Size is the size in GB of the volume. + type: integer + name: + description: Name of the Cinder volume in the context + of a machine. It will be combined with the machine + name to make the actual volume name. + type: string + volumeType: + description: VolumeType is the volume type of the volume. + If omitted, the default type will be used. + type: string + required: + - diskSize + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map cloudName: description: The name of the cloud to use from the clouds secret diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml index 3cfafc4600..f115f6f7fc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml @@ -1610,6 +1610,40 @@ spec: instance: description: Instance for the bastion itself properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications + for additional block devices to attach to the server + instance + items: + properties: + availabilityZone: + description: AvailabilityZone is the volume + availability zone to create the volume in. + If omitted, the availability zone of the server + will be used. + type: string + diskSize: + description: Size is the size in GB of the volume. + type: integer + name: + description: Name of the Cinder volume in the + context of a machine. It will be combined + with the machine name to make the actual volume + name. + type: string + volumeType: + description: VolumeType is the volume type of + the volume. If omitted, the default type will + be used. + type: string + required: + - diskSize + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map cloudName: description: The name of the cloud to use from the clouds secret diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml index 14801337bc..8bc2df59c6 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml @@ -1159,6 +1159,36 @@ spec: spec: description: OpenStackMachineSpec defines the desired state of OpenStackMachine. properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications for + additional block devices to attach to the server instance + items: + properties: + availabilityZone: + description: AvailabilityZone is the volume availability zone + to create the volume in. If omitted, the availability zone + of the server will be used. + type: string + diskSize: + description: Size is the size in GB of the volume. + type: integer + name: + description: Name of the Cinder volume in the context of a machine. + It will be combined with the machine name to make the actual + volume name. + type: string + volumeType: + description: VolumeType is the volume type of the volume. If + omitted, the default type will be used. + type: string + required: + - diskSize + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map cloudName: description: The name of the cloud to use from the clouds secret type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml index a7c6c89146..31b90927ff 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml @@ -958,6 +958,36 @@ spec: description: Spec is the specification of the desired behavior of the machine. properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications + for additional block devices to attach to the server instance + items: + properties: + availabilityZone: + description: AvailabilityZone is the volume availability + zone to create the volume in. If omitted, the availability + zone of the server will be used. + type: string + diskSize: + description: Size is the size in GB of the volume. + type: integer + name: + description: Name of the Cinder volume in the context + of a machine. It will be combined with the machine + name to make the actual volume name. + type: string + volumeType: + description: VolumeType is the volume type of the volume. + If omitted, the default type will be used. + type: string + required: + - diskSize + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map cloudName: description: The name of the cloud to use from the clouds secret diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 4861032a14..037862386e 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -445,17 +445,18 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) *compute.InstanceSpec { instanceSpec := compute.InstanceSpec{ - Name: openStackMachine.Name, - Image: openStackMachine.Spec.Image, - ImageUUID: openStackMachine.Spec.ImageUUID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: openStackMachine.Spec.ServerMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - RootVolume: openStackMachine.Spec.RootVolume, - ServerGroupID: openStackMachine.Spec.ServerGroupID, - Trunk: openStackMachine.Spec.Trunk, + Name: openStackMachine.Name, + Image: openStackMachine.Spec.Image, + ImageUUID: openStackMachine.Spec.ImageUUID, + Flavor: openStackMachine.Spec.Flavor, + SSHKeyName: openStackMachine.Spec.SSHKeyName, + UserData: userData, + Metadata: openStackMachine.Spec.ServerMetadata, + ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, + RootVolume: openStackMachine.Spec.RootVolume, + AdditionalBlockDevices: openStackMachine.Spec.AdditionalBlockDevices, + ServerGroupID: openStackMachine.Spec.ServerGroupID, + Trunk: openStackMachine.Spec.Trunk, } // Add the failure domain only if specified diff --git a/docs/book/src/topics/crd-changes/v1alpha6-to-v1alpha7.md b/docs/book/src/topics/crd-changes/v1alpha6-to-v1alpha7.md index 53d5c15396..726642cfdd 100644 --- a/docs/book/src/topics/crd-changes/v1alpha6-to-v1alpha7.md +++ b/docs/book/src/topics/crd-changes/v1alpha6-to-v1alpha7.md @@ -14,6 +14,7 @@ - [Removal of securityGroups](#removal-of-securitygroups) - [Removal of tenantId and projectId](#removal-of-tenantid-and-projectid) - [Change to profile](#change-to-profile) + - [Creation of additionalBlockDevices](#creation-of-additionalblockdevices) - [`OpenStackCluster`](#openstackcluster) - [Change to externalRouterIPs.subnet](#change-to-externalrouteripssubnet) @@ -213,6 +214,21 @@ profile: TrustedVF: true ``` +#### Creation of additionalBlockDevices + +We now have the ability for a machine to have additional block devices to be attached. + +Here is an example on how to use `additionalBlockDevices` for adding additional Cinder volumes attached +to the server instance: + +```yaml +additionalBlockDevices: +- name: database + size: 50 + volumeType: my-volume-type + availabilityZone: az0 +``` + ### `OpenStackCluster` #### Change to externalRouterIPs.subnet @@ -293,4 +309,4 @@ becomes cidr: 192.168.100.0/24 ``` -Nothing will currently create more than a single subnet, but there may be multiple subnets in the future. Similarly, code should no longer assume that the CIDR is an IPv4 CIDR, although nothing will currently create anything other than an IPv4 CIDR. \ No newline at end of file +Nothing will currently create more than a single subnet, but there may be multiple subnets in the future. Similarly, code should no longer assume that the CIDR is an IPv4 CIDR, although nothing will currently create anything other than an IPv4 CIDR. diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index 980e0c7d18..72122d2562 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -294,42 +294,12 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste }) } - volume, err := s.getOrCreateRootVolume(eventObject, instanceSpec, imageID) - if err != nil { - return nil, fmt.Errorf("error in get or create root volume: %w", err) - } - instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate) instanceCreateTimeout *= time.Minute - // Wait for volume to become available - if volume != nil { - err = wait.PollUntilContextTimeout(context.TODO(), retryIntervalInstanceStatus, instanceCreateTimeout, true, func(_ context.Context) (bool, error) { - createdVolume, err := s.getVolumeClient().GetVolume(volume.ID) - if err != nil { - if capoerrors.IsRetryable(err) { - return false, nil - } - return false, err - } - - switch createdVolume.Status { - case "available": - return true, nil - case "error": - return false, fmt.Errorf("volume %s is in error state", volume.ID) - default: - return false, nil - } - }) - if err != nil { - return nil, fmt.Errorf("volume %s did not become available: %w", volume.ID, err) - } - } - // Don't set ImageRef on the server if we're booting from volume var serverImageRef string - if volume == nil { + if !hasRootVolume(instanceSpec) { serverImageRef = imageID } @@ -345,7 +315,16 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste ConfigDrive: &instanceSpec.ConfigDrive, } - serverCreateOpts = applyRootVolume(serverCreateOpts, volume) + blockDevices, err := s.getBlockDevices(eventObject, instanceSpec, imageID, instanceCreateTimeout, retryInterval) + if err != nil { + return nil, err + } + if len(blockDevices) > 0 { + serverCreateOpts = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + } serverCreateOpts = applyServerGroupID(serverCreateOpts, instanceSpec.ServerGroupID) @@ -380,12 +359,12 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste return createdInstance, nil } -func rootVolumeName(instanceName string) string { - return fmt.Sprintf("%s-root", instanceName) +func volumeName(instanceName string, nameSuffix string) string { + return fmt.Sprintf("%s-%s", instanceName, nameSuffix) } -func hasRootVolume(rootVolume *infrav1.RootVolume) bool { - return rootVolume != nil && rootVolume.Size > 0 +func hasRootVolume(instanceSpec *InstanceSpec) bool { + return instanceSpec.RootVolume != nil && instanceSpec.RootVolume.Size > 0 } func (s *Service) getVolumeByName(name string) (*volumes.Volume, error) { @@ -407,68 +386,136 @@ func (s *Service) getVolumeByName(name string) (*volumes.Volume, error) { return &volumeList[0], nil } -func (s *Service) getOrCreateRootVolume(eventObject runtime.Object, instanceSpec *InstanceSpec, imageID string) (*volumes.Volume, error) { - rootVolume := instanceSpec.RootVolume - if !hasRootVolume(rootVolume) { - return nil, nil +// getOrCreateVolume gets or creates a volume with the given options. It returns the volume that already exists or the +// newly created one. It returns an error if the volume creation failed or if the expected volume size is different from +// the one that already exists. +func (s *Service) getOrCreateVolume(eventObject runtime.Object, opts volumes.CreateOpts) (*volumes.Volume, error) { + existingVolume, err := s.getVolumeByName(opts.Name) + if err != nil { + return nil, err } + if existingVolume != nil { + // TODO(emilien): Improve the checks here, there is an ongoing discussion in the community about how to do this + // which would involve adding metadata to the volume. + if existingVolume.Size != opts.Size { + return nil, fmt.Errorf("expected to find volume %s with size %d; found size %d", opts.Name, opts.Size, existingVolume.Size) + } - name := rootVolumeName(instanceSpec.Name) - size := rootVolume.Size + s.scope.Logger().V(3).Info("Using existing volume", "name", opts.Name, "id", existingVolume.ID) + return existingVolume, nil + } - volume, err := s.getVolumeByName(name) + createdVolume, err := s.getVolumeClient().CreateVolume(opts) if err != nil { + record.Eventf(eventObject, "FailedCreateVolume", "Failed to create volume; name=%s size=%d err=%v", opts.Name, opts.Size, err) return nil, err } - if volume != nil { - if volume.Size != size { - return nil, fmt.Errorf("exected to find volume %s with size %d; found size %d", name, size, volume.Size) + record.Eventf(eventObject, "SuccessfulCreateVolume", "Created volume; id=%s", createdVolume.ID) + return createdVolume, err +} + +func (s *Service) waitForVolume(volumeID string, timeout time.Duration, retryInterval time.Duration) error { + return wait.PollUntilContextTimeout(context.TODO(), retryInterval, timeout, true, func(_ context.Context) (bool, error) { + volume, err := s.getVolumeClient().GetVolume(volumeID) + if err != nil { + if capoerrors.IsRetryable(err) { + return false, nil + } + return false, err } - s.scope.Logger().V(3).Info("Using existing root volume", "name", name) - return volume, nil - } + switch volume.Status { + case "available": + return true, nil + case "error": + return false, fmt.Errorf("volume %s is in error state", volumeID) + default: + return false, nil + } + }) +} +// getOrCreateVolumeBuilder gets or creates a volume with the given options. It returns the volume that already exists or the newly created one. +// It returns an error if the volume creation failed or if the expected volume is different from the one that already exists. +func (s *Service) getOrCreateVolumeBuilder(eventObject runtime.Object, instanceSpec *InstanceSpec, blockDevice infrav1.AdditionalBlockDevice, imageID string, description string) (*volumes.Volume, error) { availabilityZone := instanceSpec.FailureDomain - if rootVolume.AvailabilityZone != "" { - availabilityZone = rootVolume.AvailabilityZone + if blockDevice.AvailabilityZone != "" { + availabilityZone = blockDevice.AvailabilityZone } createOpts := volumes.CreateOpts{ - Size: rootVolume.Size, - Description: fmt.Sprintf("Root volume for %s", instanceSpec.Name), - Name: rootVolumeName(instanceSpec.Name), + Name: volumeName(instanceSpec.Name, blockDevice.Name), + Description: description, + Size: blockDevice.Size, ImageID: imageID, Multiattach: false, AvailabilityZone: availabilityZone, - VolumeType: rootVolume.VolumeType, + VolumeType: blockDevice.VolumeType, } - volume, err = s.getVolumeClient().CreateVolume(createOpts) - if err != nil { - record.Eventf(eventObject, "FailedCreateVolume", "Failed to create root volume; size=%d imageID=%s err=%v", size, imageID, err) - return nil, err - } - record.Eventf(eventObject, "SuccessfulCreateVolume", "Created root volume; id=%s", volume.ID) - return volume, err + + return s.getOrCreateVolume(eventObject, createOpts) } -// applyRootVolume sets a root volume if the root volume Size is not 0. -func applyRootVolume(opts servers.CreateOptsBuilder, volume *volumes.Volume) servers.CreateOptsBuilder { - if volume == nil { - return opts +// getBlockDevices returns a list of block devices that were created and attached to the instance. It returns an error +// if the root volume or any of the additional block devices could not be created. +func (s *Service) getBlockDevices(eventObject runtime.Object, instanceSpec *InstanceSpec, imageID string, timeout time.Duration, retryInterval time.Duration) ([]bootfromvolume.BlockDevice, error) { + blockDevices := []bootfromvolume.BlockDevice{} + + if hasRootVolume(instanceSpec) { + rootVolumeToBlockDevice := infrav1.AdditionalBlockDevice{ + Name: "root", + AvailabilityZone: instanceSpec.RootVolume.AvailabilityZone, + Size: instanceSpec.RootVolume.Size, + VolumeType: instanceSpec.RootVolume.VolumeType, + } + rootVolume, err := s.getOrCreateVolumeBuilder(eventObject, instanceSpec, rootVolumeToBlockDevice, imageID, fmt.Sprintf("Root volume for %s", instanceSpec.Name)) + if err != nil { + return []bootfromvolume.BlockDevice{}, err + } + blockDevices = append(blockDevices, bootfromvolume.BlockDevice{ + SourceType: bootfromvolume.SourceVolume, + DestinationType: bootfromvolume.DestinationVolume, + UUID: rootVolume.ID, + BootIndex: 0, + DeleteOnTermination: true, + }) + } else { + blockDevices = append(blockDevices, bootfromvolume.BlockDevice{ + SourceType: bootfromvolume.SourceImage, + DestinationType: bootfromvolume.DestinationLocal, + UUID: imageID, + BootIndex: 0, + DeleteOnTermination: true, + }) } - block := bootfromvolume.BlockDevice{ - SourceType: bootfromvolume.SourceVolume, - BootIndex: 0, - UUID: volume.ID, - DeleteOnTermination: true, - DestinationType: bootfromvolume.DestinationVolume, + for _, blockDeviceSpec := range instanceSpec.AdditionalBlockDevices { + blockDevice, err := s.getOrCreateVolumeBuilder(eventObject, instanceSpec, blockDeviceSpec, "", fmt.Sprintf("Additional block device for %s", instanceSpec.Name)) + if err != nil { + return []bootfromvolume.BlockDevice{}, err + } + blockDevices = append(blockDevices, bootfromvolume.BlockDevice{ + SourceType: bootfromvolume.SourceVolume, + DestinationType: bootfromvolume.DestinationVolume, + UUID: blockDevice.ID, + BootIndex: -1, + DeleteOnTermination: true, + Tag: blockDeviceSpec.Name, + }) } - return bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: opts, - BlockDevice: []bootfromvolume.BlockDevice{block}, + + // Wait for any volumes in the block devices to become available + if len(blockDevices) > 0 { + for _, bd := range blockDevices { + if bd.SourceType == bootfromvolume.SourceVolume { + if err := s.waitForVolume(bd.UUID, timeout, retryInterval); err != nil { + return []bootfromvolume.BlockDevice{}, fmt.Errorf("volume %s did not become available: %w", bd.UUID, err) + } + } + } } + + return blockDevices, nil } // applyServerGroupID adds a scheduler hint to the CreateOptsBuilder, if the @@ -545,38 +592,24 @@ func (s *Service) GetManagementPort(openStackCluster *infrav1.OpenStackCluster, func (s *Service) DeleteInstance(openStackCluster *infrav1.OpenStackCluster, eventObject runtime.Object, instanceStatus *InstanceStatus, instanceSpec *InstanceSpec) error { if instanceStatus == nil { /* - We create a boot-from-volume instance in 2 steps: - 1. Create the volume - 2. Create the instance with the created root volume and set DeleteOnTermination + Attaching volumes to an instance is a two-step process: + + 1. Create the volume + 2. Create the instance with the created volumes in RootVolume and AdditionalBlockDevices fields with DeleteOnTermination=true - This introduces a new failure mode which has implications for safely deleting instances: we - might create the volume, but the instance create fails. This would leave us with a dangling - volume with no instance. + This has a possible failure mode where creating the volume succeeds but creating the instance + fails. In this case, we want to make sure that the dangling volumes are cleaned up. To handle this safely, we ensure that we never remove a machine finalizer until all resources - associated with the instance, including a root volume, have been deleted. To achieve this: - * We always call DeleteInstance when reconciling a delete, regardless of - whether the instance exists or not. - * If the instance was already deleted we check that the volume is also gone. + associated with the instance, including volumes, have been deleted. To achieve this: - Note that we don't need to separately delete the root volume when deleting the instance because + * We always call DeleteInstance when reconciling a delete, even if the instance does not exist + * If the instance was already deleted we check that the volumes are also gone + + Note that we don't need to separately delete the volumes when deleting the instance because DeleteOnTermination will ensure it is deleted in that case. */ - if hasRootVolume(instanceSpec.RootVolume) { - name := rootVolumeName(instanceSpec.Name) - volume, err := s.getVolumeByName(name) - if err != nil { - return err - } - if volume == nil { - return nil - } - - s.scope.Logger().V(2).Info("Deleting dangling root volume", "name", volume.Name, "ID", volume.ID) - return s.getVolumeClient().DeleteVolume(volume.ID, volumes.DeleteOpts{}) - } - - return nil + return s.deleteVolumes(instanceSpec) } instanceInterfaces, err := s.getComputeClient().ListAttachedInterfaces(instanceStatus.ID()) @@ -625,6 +658,34 @@ func (s *Service) DeleteInstance(openStackCluster *infrav1.OpenStackCluster, eve return s.deleteInstance(eventObject, instanceStatus.InstanceIdentifier()) } +func (s *Service) deleteVolumes(instanceSpec *InstanceSpec) error { + if hasRootVolume(instanceSpec) { + if err := s.deleteVolume(instanceSpec.Name, "root"); err != nil { + return err + } + } + for _, volumeSpec := range instanceSpec.AdditionalBlockDevices { + if err := s.deleteVolume(instanceSpec.Name, volumeSpec.Name); err != nil { + return err + } + } + return nil +} + +func (s *Service) deleteVolume(instanceName string, nameSuffix string) error { + volumeName := volumeName(instanceName, nameSuffix) + volume, err := s.getVolumeByName(volumeName) + if err != nil { + return err + } + if volume == nil { + return nil + } + + s.scope.Logger().V(2).Info("Deleting dangling volume", "name", volume.Name, "ID", volume.ID) + return s.getVolumeClient().DeleteVolume(volume.ID, volumes.DeleteOpts{}) +} + func (s *Service) deletePorts(eventObject runtime.Object, nets []servers.Network) error { trunkSupported, err := s.isTrunkExtSupported() if err != nil { diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index 698392ad85..f0d93b5737 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -219,17 +219,18 @@ func TestService_getImageID(t *testing.T) { } const ( - networkUUID = "d412171b-9fd7-41c1-95a6-c24e5953974d" - subnetUUID = "d2d8d98d-b234-477e-a547-868b7cb5d6a5" - portUUID = "e7b7f3d1-0a81-40b1-bfa6-a22a31b17816" - trunkUUID = "2cf74a9f-3e89-4546-9779-20f2503c4ae8" - imageUUID = "652b5a05-27fa-41d4-ac82-3e63cf6f7ab7" - flavorUUID = "6dc820db-f912-454e-a1e3-1081f3b8cc72" - instanceUUID = "383a8ec1-b6ea-4493-99dd-fc790da04ba9" - controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" - workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" - serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" - volumeUUID = "d84fe775-e25d-4f80-9888-f701e996c689" + networkUUID = "d412171b-9fd7-41c1-95a6-c24e5953974d" + subnetUUID = "d2d8d98d-b234-477e-a547-868b7cb5d6a5" + portUUID = "e7b7f3d1-0a81-40b1-bfa6-a22a31b17816" + trunkUUID = "2cf74a9f-3e89-4546-9779-20f2503c4ae8" + imageUUID = "652b5a05-27fa-41d4-ac82-3e63cf6f7ab7" + flavorUUID = "6dc820db-f912-454e-a1e3-1081f3b8cc72" + instanceUUID = "383a8ec1-b6ea-4493-99dd-fc790da04ba9" + controlPlaneSecurityGroupUUID = "c9817a91-4821-42db-8367-2301002ab659" + workerSecurityGroupUUID = "9c6c0d28-03c9-436c-815d-58440ac2c1c8" + serverGroupUUID = "7b940d62-68ef-4e42-a76a-1a62e290509c" + rootVolumeUUID = "d84fe775-e25d-4f80-9888-f701e996c689" + additionalBlockDeviceVolumeUUID = "1d1d5a56-c433-41dd-8446-cba2077e96e9" openStackMachineName = "test-openstack-machine" portName = "test-openstack-machine-0" @@ -298,6 +299,15 @@ func TestService_ReconcileInstance(t *testing.T) { "test-metadata": "test-value", }, "user_data": &userData, + "block_device_mapping_v2": []map[string]interface{}{ + { + "delete_on_termination": true, + "destination_type": "local", + "source_type": "image", + "uuid": imageUUID, + "boot_index": float64(0), + }, + }, }, "os:scheduler_hints": map[string]interface{}{ "group": serverGroupUUID, @@ -407,22 +417,22 @@ func TestService_ReconcileInstance(t *testing.T) { expectServerPoll(computeRecorder, []string{"ACTIVE"}) } - returnedVolume := func(status string) *volumes.Volume { + returnedVolume := func(uuid string, status string) *volumes.Volume { return &volumes.Volume{ - ID: volumeUUID, + ID: uuid, Status: status, } } // Expected calls when polling for server creation - expectVolumePoll := func(volumeRecorder *mock.MockVolumeClientMockRecorder, states []string) { + expectVolumePoll := func(volumeRecorder *mock.MockVolumeClientMockRecorder, uuid string, states []string) { for _, state := range states { - volumeRecorder.GetVolume(volumeUUID).Return(returnedVolume(state), nil) + volumeRecorder.GetVolume(uuid).Return(returnedVolume(uuid, state), nil) } } - expectVolumePollSuccess := func(volumeRecorder *mock.MockVolumeClientMockRecorder) { - expectVolumePoll(volumeRecorder, []string{"available"}) + expectVolumePollSuccess := func(volumeRecorder *mock.MockVolumeClientMockRecorder, uuid string) { + expectVolumePoll(volumeRecorder, uuid, []string{"available"}) } // ******************* @@ -541,8 +551,8 @@ func TestService_ReconcileInstance(t *testing.T) { Name: fmt.Sprintf("%s-root", openStackMachineName), ImageID: imageUUID, Multiattach: false, - }).Return(&volumes.Volume{ID: volumeUUID}, nil) - expectVolumePollSuccess(r.volume) + }).Return(&volumes.Volume{ID: rootVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, rootVolumeUUID) createMap := getDefaultServerMap() serverMap := createMap["server"].(map[string]interface{}) @@ -552,7 +562,7 @@ func TestService_ReconcileInstance(t *testing.T) { "delete_on_termination": true, "destination_type": "volume", "source_type": "volume", - "uuid": volumeUUID, + "uuid": rootVolumeUUID, "boot_index": float64(0), }, } @@ -588,8 +598,8 @@ func TestService_ReconcileInstance(t *testing.T) { Name: fmt.Sprintf("%s-root", openStackMachineName), ImageID: imageUUID, Multiattach: false, - }).Return(&volumes.Volume{ID: volumeUUID}, nil) - expectVolumePollSuccess(r.volume) + }).Return(&volumes.Volume{ID: rootVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, rootVolumeUUID) createMap := getDefaultServerMap() serverMap := createMap["server"].(map[string]interface{}) @@ -599,7 +609,7 @@ func TestService_ReconcileInstance(t *testing.T) { "delete_on_termination": true, "destination_type": "volume", "source_type": "volume", - "uuid": volumeUUID, + "uuid": rootVolumeUUID, "boot_index": float64(0), }, } @@ -632,13 +642,195 @@ func TestService_ReconcileInstance(t *testing.T) { Name: fmt.Sprintf("%s-root", openStackMachineName), ImageID: imageUUID, Multiattach: false, - }).Return(&volumes.Volume{ID: volumeUUID}, nil) - expectVolumePoll(r.volume, []string{"creating", "error"}) + }).Return(&volumes.Volume{ID: rootVolumeUUID}, nil) + expectVolumePoll(r.volume, rootVolumeUUID, []string{"creating", "error"}) expectCleanupDefaultPort(r.network) }, wantErr: true, }, + { + name: "Root volume with additional block device success", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.RootVolume = &infrav1.RootVolume{ + Size: 50, + } + s.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "etcd", + Size: 50, + VolumeType: "test-volume-type", + }, + } + return s + }, + expect: func(r *recorders) { + expectUseExistingDefaultPort(r.network) + expectDefaultImageAndFlavor(r.compute, r.image) + + r.volume.ListVolumes(volumes.ListOpts{Name: fmt.Sprintf("%s-root", openStackMachineName)}). + Return([]volumes.Volume{}, nil) + r.volume.CreateVolume(volumes.CreateOpts{ + Size: 50, + AvailabilityZone: failureDomain, + Description: fmt.Sprintf("Root volume for %s", openStackMachineName), + Name: fmt.Sprintf("%s-root", openStackMachineName), + ImageID: imageUUID, + Multiattach: false, + }).Return(&volumes.Volume{ID: rootVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, rootVolumeUUID) + + r.volume.ListVolumes(volumes.ListOpts{Name: fmt.Sprintf("%s-etcd", openStackMachineName)}). + Return([]volumes.Volume{}, nil) + r.volume.CreateVolume(volumes.CreateOpts{ + Size: 50, + AvailabilityZone: failureDomain, + Description: fmt.Sprintf("Additional block device for %s", openStackMachineName), + Name: fmt.Sprintf("%s-etcd", openStackMachineName), + Multiattach: false, + VolumeType: "test-volume-type", + }).Return(&volumes.Volume{ID: additionalBlockDeviceVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, additionalBlockDeviceVolumeUUID) + + createMap := getDefaultServerMap() + serverMap := createMap["server"].(map[string]interface{}) + serverMap["imageRef"] = "" + serverMap["block_device_mapping_v2"] = []map[string]interface{}{ + { + "source_type": "volume", + "uuid": rootVolumeUUID, + "boot_index": float64(0), + "delete_on_termination": true, + "destination_type": "volume", + }, + { + "source_type": "volume", + "uuid": additionalBlockDeviceVolumeUUID, + "boot_index": float64(-1), + "delete_on_termination": true, + "destination_type": "volume", + "tag": "etcd", + }, + } + expectCreateServer(r.compute, createMap, false) + expectServerPollSuccess(r.compute) + + // Don't delete ports because the server is created: DeleteInstance will do it + }, + wantErr: false, + }, + { + name: "Additional block device success", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "etcd", + Size: 50, + VolumeType: "test-volume-type", + }, + } + return s + }, + expect: func(r *recorders) { + expectUseExistingDefaultPort(r.network) + expectDefaultImageAndFlavor(r.compute, r.image) + + r.volume.ListVolumes(volumes.ListOpts{Name: fmt.Sprintf("%s-etcd", openStackMachineName)}). + Return([]volumes.Volume{}, nil) + r.volume.CreateVolume(volumes.CreateOpts{ + Size: 50, + AvailabilityZone: failureDomain, + Description: fmt.Sprintf("Additional block device for %s", openStackMachineName), + Name: fmt.Sprintf("%s-etcd", openStackMachineName), + Multiattach: false, + VolumeType: "test-volume-type", + }).Return(&volumes.Volume{ID: additionalBlockDeviceVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, additionalBlockDeviceVolumeUUID) + + createMap := getDefaultServerMap() + serverMap := createMap["server"].(map[string]interface{}) + serverMap["block_device_mapping_v2"] = []map[string]interface{}{ + { + "source_type": "image", + "uuid": imageUUID, + "boot_index": float64(0), + "delete_on_termination": true, + "destination_type": "local", + }, + { + "source_type": "volume", + "uuid": additionalBlockDeviceVolumeUUID, + "boot_index": float64(-1), + "delete_on_termination": true, + "destination_type": "volume", + "tag": "etcd", + }, + } + expectCreateServer(r.compute, createMap, false) + expectServerPollSuccess(r.compute) + + // Don't delete ports because the server is created: DeleteInstance will do it + }, + wantErr: false, + }, + { + name: "Additional block device success with explicit AZ", + getInstanceSpec: func() *InstanceSpec { + s := getDefaultInstanceSpec() + s.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "etcd", + Size: 50, + VolumeType: "test-volume-type", + AvailabilityZone: "test-alternate-az", + }, + } + return s + }, + expect: func(r *recorders) { + expectUseExistingDefaultPort(r.network) + expectDefaultImageAndFlavor(r.compute, r.image) + + r.volume.ListVolumes(volumes.ListOpts{Name: fmt.Sprintf("%s-etcd", openStackMachineName)}). + Return([]volumes.Volume{}, nil) + r.volume.CreateVolume(volumes.CreateOpts{ + Size: 50, + AvailabilityZone: "test-alternate-az", + Description: fmt.Sprintf("Additional block device for %s", openStackMachineName), + Name: fmt.Sprintf("%s-etcd", openStackMachineName), + Multiattach: false, + VolumeType: "test-volume-type", + }).Return(&volumes.Volume{ID: additionalBlockDeviceVolumeUUID}, nil) + expectVolumePollSuccess(r.volume, additionalBlockDeviceVolumeUUID) + + createMap := getDefaultServerMap() + serverMap := createMap["server"].(map[string]interface{}) + serverMap["block_device_mapping_v2"] = []map[string]interface{}{ + { + "source_type": "image", + "uuid": imageUUID, + "boot_index": float64(0), + "delete_on_termination": true, + "destination_type": "local", + }, + { + "source_type": "volume", + "uuid": additionalBlockDeviceVolumeUUID, + "boot_index": float64(-1), + "delete_on_termination": true, + "destination_type": "volume", + "tag": "etcd", + }, + } + expectCreateServer(r.compute, createMap, false) + expectServerPollSuccess(r.compute) + + // Don't delete ports because the server is created: DeleteInstance will do it + }, + wantErr: false, + }, { name: "Delete trunks on port creation error", getInstanceSpec: func() *InstanceSpec { @@ -795,12 +987,12 @@ func TestService_DeleteInstance(t *testing.T) { Name: volumeName, TenantID: "", }).Return([]volumes.Volume{{ - ID: volumeUUID, + ID: rootVolumeUUID, Name: volumeName, }}, nil) // Delete volume - r.volume.DeleteVolume(volumeUUID, volumes.DeleteOpts{}).Return(nil) + r.volume.DeleteVolume(rootVolumeUUID, volumes.DeleteOpts{}).Return(nil) }, wantErr: false, }, diff --git a/pkg/cloud/services/compute/instance_types.go b/pkg/cloud/services/compute/instance_types.go index c483e71ecb..0524e79774 100644 --- a/pkg/cloud/services/compute/instance_types.go +++ b/pkg/cloud/services/compute/instance_types.go @@ -33,21 +33,22 @@ import ( // InstanceSpec does not contain all of the fields of infrav1.Instance, as not // all of them can be set on a new instance. type InstanceSpec struct { - Name string - Image string - ImageUUID string - Flavor string - SSHKeyName string - UserData string - Metadata map[string]string - ConfigDrive bool - FailureDomain string - RootVolume *infrav1.RootVolume - ServerGroupID string - Trunk bool - Tags []string - SecurityGroups []infrav1.SecurityGroupFilter - Ports []infrav1.PortOpts + Name string + Image string + ImageUUID string + Flavor string + SSHKeyName string + UserData string + Metadata map[string]string + ConfigDrive bool + FailureDomain string + RootVolume *infrav1.RootVolume + AdditionalBlockDevices []infrav1.AdditionalBlockDevice + ServerGroupID string + Trunk bool + Tags []string + SecurityGroups []infrav1.SecurityGroupFilter + Ports []infrav1.PortOpts } // InstanceIdentifier describes an instance which has not necessarily been fetched. diff --git a/test/e2e/data/kustomize/multi-az/patch-machine-template-control-plane.yaml b/test/e2e/data/kustomize/multi-az/patch-machine-template-control-plane.yaml index f9af057890..73e908b4cc 100644 --- a/test/e2e/data/kustomize/multi-az/patch-machine-template-control-plane.yaml +++ b/test/e2e/data/kustomize/multi-az/patch-machine-template-control-plane.yaml @@ -3,3 +3,8 @@ path: /spec/template/spec/rootVolume value: diskSize: 15 +- op: add + path: /spec/template/spec/additionalBlockDevices + value: + - diskSize: 1 + name: extravol diff --git a/test/e2e/data/kustomize/multi-az/patch-machine-template-worker.yaml b/test/e2e/data/kustomize/multi-az/patch-machine-template-worker.yaml index f86b3ac976..f3e0be60cd 100644 --- a/test/e2e/data/kustomize/multi-az/patch-machine-template-worker.yaml +++ b/test/e2e/data/kustomize/multi-az/patch-machine-template-worker.yaml @@ -5,3 +5,10 @@ diskSize: 15 volumeType: ${OPENSTACK_VOLUME_TYPE_ALT} availabilityZone: ${OPENSTACK_FAILURE_DOMAIN} +- op: add + path: /spec/template/spec/additionalBlockDevices + value: + - diskSize: 1 + name: extravol + volumeType: ${OPENSTACK_VOLUME_TYPE_ALT} + availabilityZone: ${OPENSTACK_FAILURE_DOMAIN} diff --git a/test/e2e/suites/e2e/e2e_test.go b/test/e2e/suites/e2e/e2e_test.go index e7e5a0bbde..203d53530e 100644 --- a/test/e2e/suites/e2e/e2e_test.go +++ b/test/e2e/suites/e2e/e2e_test.go @@ -554,7 +554,8 @@ var _ = Describe("e2e tests [PR-Blocking]", func() { allServerNames = append(allServerNames, name) } - bootVolumes := make(map[string]*volumes.Volume) + rootVolumes := make(map[string]*volumes.Volume) + additionalVolumes := make(map[string]*volumes.Volume) for _, machine := range allMachines { // The output of a HaveKey() failure against @@ -569,33 +570,61 @@ var _ = Describe("e2e tests [PR-Blocking]", func() { Equal(*machine.Spec.FailureDomain), fmt.Sprintf("Server %s was not scheduled in the correct AZ", machine.Name)) - // Check that all machines have the expected boot volume + // Check that all machines have the expected volumes: + // - 1 root volume + // - 1 additional volume volumes := server.AttachedVolumes - Expect(volumes).To(HaveLen(1)) + Expect(volumes).To(HaveLen(2)) - bootVolume, err := shared.GetOpenStackVolume(e2eCtx, volumes[0].ID) + // nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid does not guarantee order of the volumes + // so we need to find the boot volume by checking the "bootable" flag for now. + firstVolumeFound, err := shared.GetOpenStackVolume(e2eCtx, volumes[0].ID) Expect(err).NotTo(HaveOccurred(), "failed to get OpenStack volume %s for machine %s", volumes[0].ID, machine.Name) - bootVolumes[machine.Name] = bootVolume + secondVolumeFound, err := shared.GetOpenStackVolume(e2eCtx, volumes[1].ID) + Expect(err).NotTo(HaveOccurred(), "failed to get OpenStack volume %s for machine %s", volumes[1].ID, machine.Name) + + rootVolume := firstVolumeFound + additionalVolume := secondVolumeFound + // The boot volume is the one with the "bootable" flag set. + if firstVolumeFound.Bootable != "true" { // This is genuinely a string, not a bool + rootVolume = secondVolumeFound + additionalVolume = firstVolumeFound + } - Expect(*bootVolume).To(MatchFields(IgnoreExtras, Fields{ + rootVolumes[machine.Name] = rootVolume + Expect(*rootVolume).To(MatchFields(IgnoreExtras, Fields{ "Name": Equal(fmt.Sprintf("%s-root", server.Name)), "Size": Equal(15), "Bootable": Equal("true"), // This is genuinely a string, not a bool - }), "Boot volume %s for machine %s not as expected", bootVolume.ID, machine.Name) + }), "Boot volume %s for machine %s not as expected", rootVolume.ID, machine.Name) + + additionalVolumes[machine.Name] = additionalVolume + Expect(*additionalVolume).To(MatchFields(IgnoreExtras, Fields{ + "Name": Equal(fmt.Sprintf("%s-extravol", server.Name)), + "Size": Equal(1), + }), "Additional block device %s for machine %s not as expected", additionalVolume.ID, machine.Name) } - // Expect all control plane machines to have a root volume in the same AZ as the machine, and the default volume type + // Expect all control plane machines to have volumes in the same AZ as the machine, and the default volume type for _, machine := range controlPlaneMachines { - bootVolume := bootVolumes[machine.Name] - Expect(bootVolume.AvailabilityZone).To(Equal(*machine.Spec.FailureDomain)) - Expect(bootVolume.VolumeType).NotTo(Equal(volumeTypeAlt)) + rootVolume := rootVolumes[machine.Name] + Expect(rootVolume.AvailabilityZone).To(Equal(*machine.Spec.FailureDomain)) + Expect(rootVolume.VolumeType).NotTo(Equal(volumeTypeAlt)) + + additionalVolume := additionalVolumes[machine.Name] + Expect(additionalVolume.AvailabilityZone).To(Equal(*machine.Spec.FailureDomain)) + Expect(additionalVolume.VolumeType).NotTo(Equal(volumeTypeAlt)) } - // Expect all worker machines to have a root volume in the primary AZ, and the test volume type + // Expect all worker machines to have volumes in the primary AZ, and the test volume type for _, machine := range workerMachines { - bootVolume := bootVolumes[machine.Name] - Expect(bootVolume.AvailabilityZone).To(Equal(failureDomain)) - Expect(bootVolume.VolumeType).To(Equal(volumeTypeAlt)) + rootVolume := rootVolumes[machine.Name] + Expect(rootVolume.AvailabilityZone).To(Equal(failureDomain)) + Expect(rootVolume.VolumeType).To(Equal(volumeTypeAlt)) + + additionalVolume := additionalVolumes[machine.Name] + Expect(additionalVolume.AvailabilityZone).To(Equal(failureDomain)) + Expect(additionalVolume.VolumeType).To(Equal(volumeTypeAlt)) } }) })