diff --git a/apis/metal3.io/v1alpha1/baremetalhost_types.go b/apis/metal3.io/v1alpha1/baremetalhost_types.go index 498f64b08c..f0dade95a1 100644 --- a/apis/metal3.io/v1alpha1/baremetalhost_types.go +++ b/apis/metal3.io/v1alpha1/baremetalhost_types.go @@ -220,6 +220,62 @@ type BMCDetails struct { DisableCertificateVerification bool `json:"disableCertificateVerification,omitempty"` } +// HardwareRAIDVolume defines the desired configuration of volume in hardware RAID +type HardwareRAIDVolume struct { + // Size (Integer) of the logical disk to be created in GiB. + // If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + // +kubebuilder:validation:Minimum=0 + SizeGibibytes *int `json:"sizeGibibytes,omitempty"` + + // RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0. + // +kubebuilder:validation:Enum="0";"1";"2";"5";"6";"1+0";"5+0";"6+0" + Level string `json:"level" required:"true"` + + // Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. + // +kubebuilder:validation:MaxLength=64 + Name string `json:"name,omitempty"` + + // Select disks with only rotational or solid-state storage + Rotational *bool `json:"rotational,omitempty"` + + // Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required + // for the particular RAID level. + // +kubebuilder:validation:Minimum=1 + NumberOfPhysicalDisks *int `json:"numberOfPhysicalDisks,omitempty"` +} + +// SoftwareRAIDVolume defines the desired configuration of volume in software RAID +type SoftwareRAIDVolume struct { + // Size (Integer) of the logical disk to be created in GiB. + // If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + // +kubebuilder:validation:Minimum=0 + SizeGibibytes *int `json:"sizeGibibytes,omitempty"` + + // RAID level for the logical disk. The following levels are supported: 0;1;1+0. + // +kubebuilder:validation:Enum="0";"1";"1+0" + Level string `json:"level" required:"true"` + + // A list of device hints, the number of item should be greater than or equal to 2. + // +kubebuilder:validation:MinItems=2 + PhysicalDisks []RootDeviceHints `json:"physicalDisks,omitempty"` +} + +// RAIDConfig contains the configuration that are required to config RAID in Bare Metal server +type RAIDConfig struct { + // The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + HardwareRAIDVolumes []HardwareRAIDVolume `json:"hardwareRAIDVolumes,omitempty"` + + // The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. + // If HardwareRAIDVolumes is set this item will be invalid. + // The number of created Software RAID devices must be 1 or 2. + // If there is only one Software RAID device, it has to be a RAID-1. + // If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. + // As the first RAID device will be the deployment device, + // enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. + // +kubebuilder:validation:MaxItems=2 + SoftwareRAIDVolumes []SoftwareRAIDVolume `json:"softwareRAIDVolumes,omitempty"` +} + // BareMetalHostSpec defines the desired state of BareMetalHost type BareMetalHostSpec struct { // Important: Run "make generate manifests" to regenerate code @@ -234,6 +290,9 @@ type BareMetalHostSpec struct { // How do we connect to the BMC? BMC BMCDetails `json:"bmc,omitempty"` + // RAID configuration for bare metal server + RAID *RAIDConfig `json:"raid,omitempty"` + // What is the name of the hardware profile for this host? It // should only be necessary to set this when inspection cannot // automatically determine the profile. @@ -603,6 +662,9 @@ type ProvisionStatus struct { // BootMode indicates the boot mode used to provision the node BootMode BootMode `json:"bootMode,omitempty"` + + // The Raid set by the user + RAID *RAIDConfig `json:"raid,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go b/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go index 499b596bfc..5cb07ddb38 100644 --- a/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go +++ b/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go @@ -125,6 +125,11 @@ func (in *BareMetalHostSpec) DeepCopyInto(out *BareMetalHostSpec) { } } out.BMC = in.BMC + if in.RAID != nil { + in, out := &in.RAID, &out.RAID + *out = new(RAIDConfig) + (*in).DeepCopyInto(*out) + } if in.RootDeviceHints != nil { in, out := &in.RootDeviceHints, &out.RootDeviceHints *out = new(RootDeviceHints) @@ -281,6 +286,36 @@ func (in *HardwareDetails) DeepCopy() *HardwareDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareRAIDVolume) DeepCopyInto(out *HardwareRAIDVolume) { + *out = *in + if in.SizeGibibytes != nil { + in, out := &in.SizeGibibytes, &out.SizeGibibytes + *out = new(int) + **out = **in + } + if in.Rotational != nil { + in, out := &in.Rotational, &out.Rotational + *out = new(bool) + **out = **in + } + if in.NumberOfPhysicalDisks != nil { + in, out := &in.NumberOfPhysicalDisks, &out.NumberOfPhysicalDisks + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareRAIDVolume. +func (in *HardwareRAIDVolume) DeepCopy() *HardwareRAIDVolume { + if in == nil { + return nil + } + out := new(HardwareRAIDVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HardwareSystemVendor) DeepCopyInto(out *HardwareSystemVendor) { *out = *in @@ -381,6 +416,11 @@ func (in *ProvisionStatus) DeepCopyInto(out *ProvisionStatus) { *out = new(RootDeviceHints) (*in).DeepCopyInto(*out) } + if in.RAID != nil { + in, out := &in.RAID, &out.RAID + *out = new(RAIDConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionStatus. @@ -393,6 +433,35 @@ func (in *ProvisionStatus) DeepCopy() *ProvisionStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RAIDConfig) DeepCopyInto(out *RAIDConfig) { + *out = *in + if in.HardwareRAIDVolumes != nil { + in, out := &in.HardwareRAIDVolumes, &out.HardwareRAIDVolumes + *out = make([]HardwareRAIDVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareRAIDVolumes != nil { + in, out := &in.SoftwareRAIDVolumes, &out.SoftwareRAIDVolumes + *out = make([]SoftwareRAIDVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RAIDConfig. +func (in *RAIDConfig) DeepCopy() *RAIDConfig { + if in == nil { + return nil + } + out := new(RAIDConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RebootAnnotationArguments) DeepCopyInto(out *RebootAnnotationArguments) { *out = *in @@ -428,6 +497,33 @@ func (in *RootDeviceHints) DeepCopy() *RootDeviceHints { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareRAIDVolume) DeepCopyInto(out *SoftwareRAIDVolume) { + *out = *in + if in.SizeGibibytes != nil { + in, out := &in.SizeGibibytes, &out.SizeGibibytes + *out = new(int) + **out = **in + } + if in.PhysicalDisks != nil { + in, out := &in.PhysicalDisks, &out.PhysicalDisks + *out = make([]RootDeviceHints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareRAIDVolume. +func (in *SoftwareRAIDVolume) DeepCopy() *SoftwareRAIDVolume { + if in == nil { + return nil + } + out := new(SoftwareRAIDVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in diff --git a/config/crd/bases/metal3.io_baremetalhosts.yaml b/config/crd/bases/metal3.io_baremetalhosts.yaml index 6cfb8962a5..3e34587963 100644 --- a/config/crd/bases/metal3.io_baremetalhosts.yaml +++ b/config/crd/bases/metal3.io_baremetalhosts.yaml @@ -179,6 +179,106 @@ spec: online: description: Should the server be online? type: boolean + raid: + description: RAID configuration for bare metal server + properties: + hardwareRAIDVolumes: + description: The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + items: + description: HardwareRAIDVolume defines the desired configuration of volume in hardware RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0.' + enum: + - "0" + - "1" + - "2" + - "5" + - "6" + - 1+0 + - 5+0 + - 6+0 + type: string + name: + description: Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. + maxLength: 64 + type: string + numberOfPhysicalDisks: + description: Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level. + minimum: 1 + type: integer + rotational: + description: Select disks with only rotational or solid-state storage + type: boolean + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + type: array + softwareRAIDVolumes: + description: The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. If HardwareRAIDVolumes is set this item will be invalid. The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. + items: + description: SoftwareRAIDVolume defines the desired configuration of volume in software RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;1+0.' + enum: + - "0" + - "1" + - 1+0 + type: string + physicalDisks: + description: A list of device hints, the number of item should be greater than or equal to 2. + items: + description: RootDeviceHints holds the hints for specifying the storage location for the root filesystem for the image. + properties: + deviceName: + description: A Linux device name like "/dev/vda". The hint must match the actual value exactly. + type: string + hctl: + description: A SCSI bus address like 0:0:0:0. The hint must match the actual value exactly. + type: string + minSizeGigabytes: + description: The minimum size of the device in Gigabytes. + minimum: 0 + type: integer + model: + description: A vendor-specific device identifier. The hint can be a substring of the actual value. + type: string + rotational: + description: True if the device should use spinning media, false otherwise. + type: boolean + serialNumber: + description: Device serial number. The hint must match the actual value exactly. + type: string + vendor: + description: The name of the vendor or manufacturer of the device. The hint can be a substring of the actual value. + type: string + wwn: + description: Unique storage identifier. The hint must match the actual value exactly. + type: string + wwnVendorExtension: + description: Unique vendor storage identifier. The hint must match the actual value exactly. + type: string + wwnWithExtension: + description: Unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. + type: string + type: object + minItems: 2 + type: array + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + maxItems: 2 + type: array + type: object rootDeviceHints: description: Provide guidance about how to choose the device for the image being provisioned. properties: @@ -573,6 +673,106 @@ spec: required: - url type: object + raid: + description: The Raid set by the user + properties: + hardwareRAIDVolumes: + description: The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + items: + description: HardwareRAIDVolume defines the desired configuration of volume in hardware RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0.' + enum: + - "0" + - "1" + - "2" + - "5" + - "6" + - 1+0 + - 5+0 + - 6+0 + type: string + name: + description: Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. + maxLength: 64 + type: string + numberOfPhysicalDisks: + description: Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level. + minimum: 1 + type: integer + rotational: + description: Select disks with only rotational or solid-state storage + type: boolean + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + type: array + softwareRAIDVolumes: + description: The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. If HardwareRAIDVolumes is set this item will be invalid. The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. + items: + description: SoftwareRAIDVolume defines the desired configuration of volume in software RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;1+0.' + enum: + - "0" + - "1" + - 1+0 + type: string + physicalDisks: + description: A list of device hints, the number of item should be greater than or equal to 2. + items: + description: RootDeviceHints holds the hints for specifying the storage location for the root filesystem for the image. + properties: + deviceName: + description: A Linux device name like "/dev/vda". The hint must match the actual value exactly. + type: string + hctl: + description: A SCSI bus address like 0:0:0:0. The hint must match the actual value exactly. + type: string + minSizeGigabytes: + description: The minimum size of the device in Gigabytes. + minimum: 0 + type: integer + model: + description: A vendor-specific device identifier. The hint can be a substring of the actual value. + type: string + rotational: + description: True if the device should use spinning media, false otherwise. + type: boolean + serialNumber: + description: Device serial number. The hint must match the actual value exactly. + type: string + vendor: + description: The name of the vendor or manufacturer of the device. The hint can be a substring of the actual value. + type: string + wwn: + description: Unique storage identifier. The hint must match the actual value exactly. + type: string + wwnVendorExtension: + description: Unique vendor storage identifier. The hint must match the actual value exactly. + type: string + wwnWithExtension: + description: Unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. + type: string + type: object + minItems: 2 + type: array + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + maxItems: 2 + type: array + type: object rootDeviceHints: description: The RootDevicehints set by the user properties: diff --git a/config/render/capm3.yaml b/config/render/capm3.yaml index af925086ab..3baf8f22e5 100644 --- a/config/render/capm3.yaml +++ b/config/render/capm3.yaml @@ -177,6 +177,106 @@ spec: online: description: Should the server be online? type: boolean + raid: + description: RAID configuration for bare metal server + properties: + hardwareRAIDVolumes: + description: The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + items: + description: HardwareRAIDVolume defines the desired configuration of volume in hardware RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0.' + enum: + - "0" + - "1" + - "2" + - "5" + - "6" + - 1+0 + - 5+0 + - 6+0 + type: string + name: + description: Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. + maxLength: 64 + type: string + numberOfPhysicalDisks: + description: Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level. + minimum: 1 + type: integer + rotational: + description: Select disks with only rotational or solid-state storage + type: boolean + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + type: array + softwareRAIDVolumes: + description: The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. If HardwareRAIDVolumes is set this item will be invalid. The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. + items: + description: SoftwareRAIDVolume defines the desired configuration of volume in software RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;1+0.' + enum: + - "0" + - "1" + - 1+0 + type: string + physicalDisks: + description: A list of device hints, the number of item should be greater than or equal to 2. + items: + description: RootDeviceHints holds the hints for specifying the storage location for the root filesystem for the image. + properties: + deviceName: + description: A Linux device name like "/dev/vda". The hint must match the actual value exactly. + type: string + hctl: + description: A SCSI bus address like 0:0:0:0. The hint must match the actual value exactly. + type: string + minSizeGigabytes: + description: The minimum size of the device in Gigabytes. + minimum: 0 + type: integer + model: + description: A vendor-specific device identifier. The hint can be a substring of the actual value. + type: string + rotational: + description: True if the device should use spinning media, false otherwise. + type: boolean + serialNumber: + description: Device serial number. The hint must match the actual value exactly. + type: string + vendor: + description: The name of the vendor or manufacturer of the device. The hint can be a substring of the actual value. + type: string + wwn: + description: Unique storage identifier. The hint must match the actual value exactly. + type: string + wwnVendorExtension: + description: Unique vendor storage identifier. The hint must match the actual value exactly. + type: string + wwnWithExtension: + description: Unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. + type: string + type: object + minItems: 2 + type: array + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + maxItems: 2 + type: array + type: object rootDeviceHints: description: Provide guidance about how to choose the device for the image being provisioned. properties: @@ -571,6 +671,106 @@ spec: required: - url type: object + raid: + description: The Raid set by the user + properties: + hardwareRAIDVolumes: + description: The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + items: + description: HardwareRAIDVolume defines the desired configuration of volume in hardware RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;2;5;6;1+0;5+0;6+0.' + enum: + - "0" + - "1" + - "2" + - "5" + - "6" + - 1+0 + - 5+0 + - 6+0 + type: string + name: + description: Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. + maxLength: 64 + type: string + numberOfPhysicalDisks: + description: Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level. + minimum: 1 + type: integer + rotational: + description: Select disks with only rotational or solid-state storage + type: boolean + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + type: array + softwareRAIDVolumes: + description: The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. If HardwareRAIDVolumes is set this item will be invalid. The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. + items: + description: SoftwareRAIDVolume defines the desired configuration of volume in software RAID + properties: + level: + description: 'RAID level for the logical disk. The following levels are supported: 0;1;1+0.' + enum: + - "0" + - "1" + - 1+0 + type: string + physicalDisks: + description: A list of device hints, the number of item should be greater than or equal to 2. + items: + description: RootDeviceHints holds the hints for specifying the storage location for the root filesystem for the image. + properties: + deviceName: + description: A Linux device name like "/dev/vda". The hint must match the actual value exactly. + type: string + hctl: + description: A SCSI bus address like 0:0:0:0. The hint must match the actual value exactly. + type: string + minSizeGigabytes: + description: The minimum size of the device in Gigabytes. + minimum: 0 + type: integer + model: + description: A vendor-specific device identifier. The hint can be a substring of the actual value. + type: string + rotational: + description: True if the device should use spinning media, false otherwise. + type: boolean + serialNumber: + description: Device serial number. The hint must match the actual value exactly. + type: string + vendor: + description: The name of the vendor or manufacturer of the device. The hint can be a substring of the actual value. + type: string + wwn: + description: Unique storage identifier. The hint must match the actual value exactly. + type: string + wwnVendorExtension: + description: Unique vendor storage identifier. The hint must match the actual value exactly. + type: string + wwnWithExtension: + description: Unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. + type: string + type: object + minItems: 2 + type: array + sizeGibibytes: + description: Size (Integer) of the logical disk to be created in GiB. If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + minimum: 0 + type: integer + required: + - level + type: object + maxItems: 2 + type: array + type: object rootDeviceHints: description: The RootDevicehints set by the user properties: diff --git a/controllers/metal3.io/baremetalhost_controller.go b/controllers/metal3.io/baremetalhost_controller.go index 891f7fe334..e959bdf47c 100644 --- a/controllers/metal3.io/baremetalhost_controller.go +++ b/controllers/metal3.io/baremetalhost_controller.go @@ -703,6 +703,7 @@ func (r *BareMetalHostReconciler) actionProvisioning(prov provisioner.Provisione // fields of a host. func clearHostProvisioningSettings(host *metal3v1alpha1.BareMetalHost) { host.Status.Provisioning.RootDeviceHints = nil + host.Status.Provisioning.RAID = nil } func (r *BareMetalHostReconciler) actionDeprovisioning(prov provisioner.Provisioner, info *reconcileInfo) actionResult { @@ -917,6 +918,44 @@ func saveHostProvisioningSettings(host *metal3v1alpha1.BareMetalHost) (dirty boo dirty = true } + // Copy RAID settings + if host.Spec.RAID != host.Status.Provisioning.RAID { + // If RAID settings is nil, remove saved settings, + // else check hardware RAID and software RAID. + if host.Spec.RAID == nil { + host.Status.Provisioning.RAID = nil + dirty = true + } else { + if host.Status.Provisioning.RAID == nil { + host.Status.Provisioning.RAID = &metal3v1alpha1.RAIDConfig{} + dirty = true + } + // If HardwareRAIDVolumes isn't nil, we will ignore SoftwareRAIDVolumes. + if len(host.Spec.RAID.HardwareRAIDVolumes) != 0 { + // If software RAID has been saved, remove it. + if len(host.Status.Provisioning.RAID.SoftwareRAIDVolumes) != 0 { + host.Status.Provisioning.RAID.SoftwareRAIDVolumes = nil + } + // Compare hardware RAID settings + if !reflect.DeepEqual(host.Spec.RAID.HardwareRAIDVolumes, host.Status.Provisioning.RAID.HardwareRAIDVolumes) { + host.Status.Provisioning.RAID.HardwareRAIDVolumes = host.Spec.RAID.HardwareRAIDVolumes + dirty = true + } + } else { + // If hardware RAID has been saved, remove it. + if len(host.Status.Provisioning.RAID.HardwareRAIDVolumes) != 0 { + host.Status.Provisioning.RAID.HardwareRAIDVolumes = nil + dirty = true + } + // Compare software RAID settings + if !reflect.DeepEqual(host.Spec.RAID.SoftwareRAIDVolumes, host.Status.Provisioning.RAID.SoftwareRAIDVolumes) { + host.Status.Provisioning.RAID.SoftwareRAIDVolumes = host.Spec.RAID.SoftwareRAIDVolumes + dirty = true + } + } + } + } + return } diff --git a/controllers/metal3.io/baremetalhost_controller_test.go b/controllers/metal3.io/baremetalhost_controller_test.go index b41dbb33ab..f8f4d50f1b 100644 --- a/controllers/metal3.io/baremetalhost_controller_test.go +++ b/controllers/metal3.io/baremetalhost_controller_test.go @@ -1384,3 +1384,408 @@ func TestErrorCountIncrementsAlways(t *testing.T) { assert.Equal(t, before+1, b.Status.ErrorCount) } } + +func TestUpdateRAID(t *testing.T) { + host := metal3v1alpha1.BareMetalHost{ + Spec: metal3v1alpha1.BareMetalHostSpec{ + HardwareProfile: "libvirt", + RootDeviceHints: &metal3v1alpha1.RootDeviceHints{ + DeviceName: "userd_devicename", + HCTL: "1:2:3:4", + Model: "userd_model", + Vendor: "userd_vendor", + SerialNumber: "userd_serial", + MinSizeGigabytes: 40, + WWN: "userd_wwn", + WWNWithExtension: "userd_with_extension", + WWNVendorExtension: "userd_vendor_extension", + }, + RAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "root", + }, + { + Name: "v1", + }, + }, + }, + }, + Status: metal3v1alpha1.BareMetalHostStatus{ + Provisioning: metal3v1alpha1.ProvisionStatus{ + RootDeviceHints: &metal3v1alpha1.RootDeviceHints{ + DeviceName: "userd_devicename", + HCTL: "1:2:3:4", + Model: "userd_model", + Vendor: "userd_vendor", + SerialNumber: "userd_serial", + MinSizeGigabytes: 40, + WWN: "userd_wwn", + WWNWithExtension: "userd_with_extension", + WWNVendorExtension: "userd_vendor_extension", + }, + }, + }, + } + cases := []struct { + name string + specRAID *metal3v1alpha1.RAIDConfig + statusRAID *metal3v1alpha1.RAIDConfig + dirty bool + expected *metal3v1alpha1.RAIDConfig + }{ + { + name: "not configured, not saved", + specRAID: nil, + statusRAID: nil, + dirty: false, + }, + { + name: "not configured, not saved", + specRAID: &metal3v1alpha1.RAIDConfig{}, + statusRAID: &metal3v1alpha1.RAIDConfig{}, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{}, + }, + { + name: "not configured, not saved", + specRAID: &metal3v1alpha1.RAIDConfig{}, + statusRAID: nil, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{}, + }, + { + name: "not configured, not saved", + specRAID: nil, + statusRAID: &metal3v1alpha1.RAIDConfig{}, + dirty: true, + expected: nil, + }, + { + name: "HardwareRAIDVolumes configured, not saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: nil, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "SoftwareRAIDVolumes configured, not saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: nil, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "both configured, not saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: nil, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "HardwareRAIDVolumes configured, HardwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "HardwareRAIDVolumes configured, SoftwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "HardwareRAIDVolumes configured, both saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "SoftwareRAIDVolumes configured, HardwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "SoftwareRAIDVolumes configured, SoftwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "SoftwareRAIDVolumes configured, both saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "both configured, HardwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "both configured, SoftwareRAIDVolumes saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: true, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + { + name: "both configured, both saved", + specRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + statusRAID: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + dirty: false, + expected: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Level: "1", + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + host.Spec.RAID = c.specRAID + host.Status.Provisioning.RAID = c.statusRAID + dirty, _ := saveHostProvisioningSettings(&host) + assert.Equal(t, c.dirty, dirty) + assert.Equal(t, c.expected, host.Status.Provisioning.RAID) + }) + } +} diff --git a/pkg/provisioner/ironic/ironic.go b/pkg/provisioner/ironic/ironic.go index d73b63d002..12e364bc2a 100644 --- a/pkg/provisioner/ironic/ironic.go +++ b/pkg/provisioner/ironic/ironic.go @@ -1220,14 +1220,35 @@ func (p *ironicProvisioner) ironicHasSameImage(ironicNode *nodes.Node) (sameImag return sameImage } -func (p *ironicProvisioner) buildManualCleaningSteps() (cleanSteps []nodes.CleanStep) { +func (p *ironicProvisioner) buildManualCleaningSteps() (cleanSteps []nodes.CleanStep, err error) { + // Build raid clean steps + if p.bmcAccess.RAIDInterface() != "" { + cleanSteps = append(cleanSteps, BuildRAIDCleanSteps(p.host.Status.Provisioning.RAID)...) + } else if p.host.Status.Provisioning.RAID != nil { + return nil, fmt.Errorf("RAID settings are defined, but the node's driver %s does not support RAID", p.bmcAccess.Driver()) + } + // TODO: Add manual cleaning steps for host configuration return } func (p *ironicProvisioner) startManualCleaning(ironicNode *nodes.Node) (success bool, result provisioner.Result, err error) { - cleanSteps := p.buildManualCleaningSteps() + if p.bmcAccess.RAIDInterface() != "" { + // Set raid configuration + err = setTargetRAIDCfg(p, ironicNode) + if err != nil { + result, err = transientError(err) + return + } + } + + // Build manual clean steps + cleanSteps, err := p.buildManualCleaningSteps() + if err != nil { + result, err = operationFailed(err.Error()) + return + } // Start manual clean if len(cleanSteps) != 0 { @@ -1260,7 +1281,13 @@ func (p *ironicProvisioner) Prepare(unprepared bool) (result provisioner.Result, switch nodes.ProvisionState(ironicNode.ProvisionState) { case nodes.Available: - if unprepared && len(p.buildManualCleaningSteps()) != 0 { + var cleanSteps []nodes.CleanStep + cleanSteps, err = p.buildManualCleaningSteps() + if err != nil { + result, err = operationFailed(err.Error()) + return + } + if unprepared && len(cleanSteps) != 0 { result, err = p.changeNodeProvisionState( ironicNode, nodes.ProvisionStateOpts{Target: nodes.TargetManage}, diff --git a/pkg/provisioner/ironic/prepare_test.go b/pkg/provisioner/ironic/prepare_test.go index 79532c5ac0..1ec188ae9e 100644 --- a/pkg/provisioner/ironic/prepare_test.go +++ b/pkg/provisioner/ironic/prepare_test.go @@ -8,6 +8,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/baremetalintrospection/v1/introspection" "github.com/stretchr/testify/assert" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" "github.com/metal3-io/baremetal-operator/pkg/bmc" "github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/clients" "github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/testserver" @@ -19,6 +20,7 @@ func TestPrepare(t *testing.T) { name string ironic *testserver.IronicMock unprepared bool + existRaidConfig bool expectedStarted bool expectedDirty bool expectedError bool @@ -35,18 +37,18 @@ func TestPrepare(t *testing.T) { expectedRequestAfter: 0, expectedDirty: false, }, - // TODO: ADD test case when clean steps aren't empty - // { - // name: "manageable state(have clean steps)", - // ironic: testserver.NewIronic(t).WithDefaultResponses().Node(nodes.Node{ - // ProvisionState: string(nodes.Manageable), - // UUID: nodeUUID, - // }), - // unprepared: true, - // expectedStarted: true, - // expectedRequestAfter: 10, - // expectedDirty: true, - // }, + { + name: "manageable state(have clean steps)", + ironic: testserver.NewIronic(t).WithDefaultResponses().Node(nodes.Node{ + ProvisionState: string(nodes.Manageable), + UUID: nodeUUID, + }), + unprepared: true, + existRaidConfig: true, + expectedStarted: true, + expectedRequestAfter: 10, + expectedDirty: true, + }, { name: "cleanFail state(cleaned provision settings)", ironic: testserver.NewIronic(t).WithDefaultResponses().Node(nodes.Node{ @@ -64,6 +66,7 @@ func TestPrepare(t *testing.T) { UUID: nodeUUID, }), unprepared: true, + existRaidConfig: true, expectedStarted: false, expectedRequestAfter: 10, expectedDirty: true, @@ -74,6 +77,7 @@ func TestPrepare(t *testing.T) { ProvisionState: string(nodes.Cleaning), UUID: nodeUUID, }), + existRaidConfig: true, expectedStarted: false, expectedRequestAfter: 10, expectedDirty: true, @@ -84,6 +88,7 @@ func TestPrepare(t *testing.T) { ProvisionState: string(nodes.CleanWait), UUID: nodeUUID, }), + existRaidConfig: true, expectedStarted: false, expectedRequestAfter: 10, expectedDirty: true, @@ -94,6 +99,7 @@ func TestPrepare(t *testing.T) { ProvisionState: string(nodes.Manageable), UUID: nodeUUID, }), + existRaidConfig: true, expectedStarted: false, expectedRequestAfter: 0, expectedDirty: false, @@ -114,6 +120,21 @@ func TestPrepare(t *testing.T) { defer inspector.Stop() host := makeHost() + if tc.existRaidConfig { + host.Spec.BMC.Address = "irmc://test.bmc/" + host.Status.Provisioning.RAID = &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "root", + Level: "1", + }, + { + Name: "v1", + Level: "1", + }, + }, + } + } publisher := func(reason, message string) {} auth := clients.AuthConfig{Type: clients.NoAuth} diff --git a/pkg/provisioner/ironic/raid.go b/pkg/provisioner/ironic/raid.go new file mode 100644 index 0000000000..2acefeb733 --- /dev/null +++ b/pkg/provisioner/ironic/raid.go @@ -0,0 +1,173 @@ +package ironic + +import ( + "fmt" + + "github.com/gophercloud/gophercloud/openstack/baremetal/v1/nodes" + + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/devicehints" + + "github.com/pkg/errors" +) + +// setTargetRAIDCfg set the RAID settings to the ironic Node for RAID configuration steps +func setTargetRAIDCfg(p *ironicProvisioner, ironicNode *nodes.Node) (err error) { + var logicalDisks []nodes.LogicalDisk + + // Build target for RAID configuration steps + logicalDisks, err = BuildTargetRAIDCfg(p.host.Status.Provisioning.RAID) + if len(logicalDisks) == 0 || err != nil { + return + } + + // set root volume + if p.host.Status.Provisioning.RootDeviceHints == nil { + logicalDisks[0].IsRootVolume = new(bool) + *logicalDisks[0].IsRootVolume = true + } else { + p.log.Info("rootDeviceHints is used, the first volume of raid will not be set to root") + } + + // Set target for RAID configuration steps + return nodes.SetRAIDConfig( + p.client, + ironicNode.UUID, + nodes.RAIDConfigOpts{LogicalDisks: logicalDisks}, + ).ExtractErr() +} + +// BuildTargetRAIDCfg build RAID logical disks, this method doesn't set the root volume +func BuildTargetRAIDCfg(raid *metal3v1alpha1.RAIDConfig) (logicalDisks []nodes.LogicalDisk, err error) { + // Deal possible panic + defer func() { + r := recover() + if r != nil { + err = fmt.Errorf("panic in build RAID settings: %v", r) + } + }() + + if raid == nil { + return + } + + // build logicalDisks + if len(raid.HardwareRAIDVolumes) != 0 { + logicalDisks, err = buildTargetHardwareRAIDCfg(raid.HardwareRAIDVolumes) + } else if len(raid.SoftwareRAIDVolumes) != 0 { + logicalDisks, err = buildTargetSoftwareRAIDCfg(raid.SoftwareRAIDVolumes) + } + + return +} + +// A private method to build hardware RAID disks +func buildTargetHardwareRAIDCfg(volumes []metal3v1alpha1.HardwareRAIDVolume) (logicalDisks []nodes.LogicalDisk, err error) { + var ( + logicalDisk nodes.LogicalDisk + nameCheckFlags map[string]int = make(map[string]int) + ) + + if len(volumes) == 0 { + return + } + + for index, volume := range volumes { + // Check volume's name + if volume.Name != "" { + i, exist := nameCheckFlags[volume.Name] + if exist { + return nil, errors.Errorf("the names(%s) of volume[%d] and volume[%d] are repeated", volume.Name, index, i) + } + nameCheckFlags[volume.Name] = index + } + // Build logicalDisk + logicalDisk = nodes.LogicalDisk{ + SizeGB: volume.SizeGibibytes, + RAIDLevel: nodes.RAIDLevel(volume.Level), + VolumeName: volume.Name, + } + if volume.Rotational != nil { + if *volume.Rotational { + logicalDisk.DiskType = nodes.HDD + } else { + logicalDisk.DiskType = nodes.SSD + } + } + if volume.NumberOfPhysicalDisks != nil { + logicalDisk.NumberOfPhysicalDisks = *volume.NumberOfPhysicalDisks + } + // Add to logicalDisks + logicalDisks = append(logicalDisks, logicalDisk) + } + + return +} + +// A private method to build software RAID disks +func buildTargetSoftwareRAIDCfg(volumes []metal3v1alpha1.SoftwareRAIDVolume) (logicalDisks []nodes.LogicalDisk, err error) { + var ( + logicalDisk nodes.LogicalDisk + ) + + if len(volumes) == 0 { + return + } + + if nodes.RAIDLevel(volumes[0].Level) != nodes.RAID1 { + return nil, errors.Errorf("the level in first volume of software raid must be RAID1") + } + + for _, volume := range volumes { + // Build logicalDisk + logicalDisk = nodes.LogicalDisk{ + SizeGB: volume.SizeGibibytes, + RAIDLevel: nodes.RAIDLevel(volume.Level), + Controller: "software", + } + // Build physical disks hint + for i := range volume.PhysicalDisks { + logicalDisk.PhysicalDisks = append(logicalDisk.PhysicalDisks, devicehints.MakeHintMap(&volume.PhysicalDisks[i])) + } + // Add to logicalDisks + logicalDisks = append(logicalDisks, logicalDisk) + } + + return +} + +// BuildRAIDCleanSteps build the clean steps for RAID configuration from BaremetalHost spec +func BuildRAIDCleanSteps(raid *metal3v1alpha1.RAIDConfig) (cleanSteps []nodes.CleanStep) { + // Add ‘delete_configuration’ before ‘create_configuration’ to make sure + // that only the desired logical disks exist in the system after manual cleaning. + cleanSteps = append( + cleanSteps, + nodes.CleanStep{ + Interface: "raid", + Step: "delete_configuration", + }, + ) + // If not configure raid, only need to clear old configuration + if raid == nil || (len(raid.HardwareRAIDVolumes) == 0 && len(raid.SoftwareRAIDVolumes) == 0) { + return + } + if len(raid.HardwareRAIDVolumes) == 0 && len(raid.SoftwareRAIDVolumes) != 0 { + cleanSteps = append( + cleanSteps, + nodes.CleanStep{ + Interface: "deploy", + Step: "erase_devices_metadata", + }, + ) + } + // ‘create_configuration’ doesn’t remove existing disks. It is recommended + // that only the desired logical disks exist in the system after manual cleaning. + cleanSteps = append( + cleanSteps, + nodes.CleanStep{ + Interface: "raid", + Step: "create_configuration", + }, + ) + return +} diff --git a/pkg/provisioner/ironic/raid_test.go b/pkg/provisioner/ironic/raid_test.go new file mode 100644 index 0000000000..0b07a4e13f --- /dev/null +++ b/pkg/provisioner/ironic/raid_test.go @@ -0,0 +1,304 @@ +package ironic + +import ( + "reflect" + "testing" + + "github.com/gophercloud/gophercloud/openstack/baremetal/v1/nodes" + + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" +) + +func TestBuildTargetRAIDCfg(t *testing.T) { + var TRUE bool = true + var FALSE bool = false + cases := []struct { + name string + raid *metal3v1alpha1.RAIDConfig + expected []nodes.LogicalDisk + expectedError string + }{ + { + name: "hardware raid", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "root", + Level: "1", + Rotational: &FALSE, + }, + { + Name: "v1", + Level: "1", + Rotational: &TRUE, + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + { + Level: "1", + }, + }, + }, + expected: []nodes.LogicalDisk{ + { + RAIDLevel: "1", + VolumeName: "root", + DiskType: nodes.SSD, + }, + { + RAIDLevel: "1", + DiskType: nodes.HDD, + VolumeName: "v1", + }, + }, + }, + { + name: "hardware raid, same volume's name", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "v1", + Level: "1", + }, + { + Name: "v1", + Level: "1", + }, + }, + }, + expectedError: "the names(v1) of volume[1] and volume[0] are repeated", + }, + { + name: "hardware raid, volume's name is empty", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "", + Level: "1", + }, + { + Name: "", + Level: "1", + }, + }, + }, + expected: []nodes.LogicalDisk{ + { + RAIDLevel: "1", + VolumeName: "", + }, + { + RAIDLevel: "1", + VolumeName: "", + }, + }, + }, + { + name: "software raid", + raid: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + PhysicalDisks: []metal3v1alpha1.RootDeviceHints{ + { + MinSizeGigabytes: 100, + }, + { + MinSizeGigabytes: 200, + }, + }, + }, + { + Level: "1", + }, + }, + }, + expected: []nodes.LogicalDisk{ + { + RAIDLevel: "1", + Controller: "software", + PhysicalDisks: []interface{}{ + map[string]string{ + "size": ">= 100", + }, + map[string]string{ + "size": ">= 200", + }, + }, + }, + { + RAIDLevel: "1", + Controller: "software", + }, + }, + }, + { + name: "software raid, the level in first volume isn't RAID1", + raid: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "0", + }, + { + Level: "1", + }, + }, + }, + expectedError: "the level in first volume of software raid must be RAID1", + }, + { + name: "raid is nil", + raid: nil, + expected: nil, + }, + { + name: "volumes is nil", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: nil, + SoftwareRAIDVolumes: nil, + }, + expected: nil, + }, + { + name: "volumes is empty", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{}, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{}, + }, + expected: nil, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + cfg, err := BuildTargetRAIDCfg(c.raid) + if c.expectedError != "" { + if err == nil || err.Error() != c.expectedError { + t.Errorf("expectError: %v, got: %v", c.expectedError, err) + } + return + } + if !reflect.DeepEqual(c.expected, cfg) { + t.Errorf("expected: %v, got: %v", c.expected, cfg) + } + }) + } +} + +func TestBuildRAIDCleanSteps(t *testing.T) { + cases := []struct { + name string + raid *metal3v1alpha1.RAIDConfig + expected []nodes.CleanStep + expectedError string + }{ + { + name: "hardware raid", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{ + { + Name: "root", + Level: "1", + }, + { + Name: "v1", + Level: "1", + }, + }, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + { + Level: "1", + }, + }, + }, + expected: []nodes.CleanStep{ + { + Interface: "raid", + Step: "delete_configuration", + }, + { + Interface: "raid", + Step: "create_configuration", + }, + }, + }, + { + name: "software raid", + raid: &metal3v1alpha1.RAIDConfig{ + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{ + { + Level: "1", + }, + { + Level: "1", + }, + }, + }, + expected: []nodes.CleanStep{ + { + Interface: "raid", + Step: "delete_configuration", + }, + { + Interface: "deploy", + Step: "erase_devices_metadata", + }, + { + Interface: "raid", + Step: "create_configuration", + }, + }, + }, + { + name: "raid is nil", + raid: nil, + expected: []nodes.CleanStep{ + { + Interface: "raid", + Step: "delete_configuration", + }, + }, + }, + { + name: "volumes is nil", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: nil, + SoftwareRAIDVolumes: nil, + }, + expected: []nodes.CleanStep{ + { + Interface: "raid", + Step: "delete_configuration", + }, + }, + }, + { + name: "volumes is empty", + raid: &metal3v1alpha1.RAIDConfig{ + HardwareRAIDVolumes: []metal3v1alpha1.HardwareRAIDVolume{}, + SoftwareRAIDVolumes: []metal3v1alpha1.SoftwareRAIDVolume{}, + }, + expected: []nodes.CleanStep{ + { + Interface: "raid", + Step: "delete_configuration", + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + step := BuildRAIDCleanSteps(c.raid) + if !reflect.DeepEqual(c.expected, step) { + t.Errorf("expected: %v, got: %v", c.expected, step) + } + }) + } +} diff --git a/pkg/provisioner/ironic/testserver/ironic.go b/pkg/provisioner/ironic/testserver/ironic.go index ce6e48b0c0..56e7f2de76 100644 --- a/pkg/provisioner/ironic/testserver/ironic.go +++ b/pkg/provisioner/ironic/testserver/ironic.go @@ -34,6 +34,7 @@ func (m *IronicMock) WithDefaultResponses() *IronicMock { }) m.AddDefaultResponse("/v1/nodes/{id}/states/provision", "", http.StatusAccepted, "{}") m.AddDefaultResponse("/v1/nodes/{id}/states/power", "", http.StatusAccepted, "{}") + m.AddDefaultResponse("/v1/nodes/{id}/states/raid", "", http.StatusNoContent, "{}") m.AddDefaultResponse("/v1/nodes/{id}/validate", "", http.StatusOK, "{}") m.Ready()