diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 7a7c135821d..d10c17378ce 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -43,6 +43,1103 @@ spec: may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string + arbiter: + description: |- + Arbiter is the configuration for the machines that comprise the + arbiter nodes. + properties: + architecture: + default: amd64 + description: |- + Architecture is the instruction set architecture of the machine pool. + Defaults to amd64. + enum: + - "" + - amd64 + type: string + hyperthreading: + default: Enabled + description: |- + Hyperthreading determines the mode of hyperthreading that machines in the + pool will utilize. + Default is for hyperthreading to be enabled. + enum: + - "" + - Enabled + - Disabled + type: string + name: + description: |- + Name is the name of the machine pool. + For the control plane machine pool, the name will always be "master". + For the compute machine pools, the only valid name is "worker". + For the arbiter machine pools, the only valid name is "arbiter". + type: string + platform: + description: Platform is configuration for machine pool specific to + the platform. + properties: + aws: + description: AWS is the configuration used when installing on + AWS. + properties: + additionalSecurityGroupIDs: + description: |- + AdditionalSecurityGroupIDs contains IDs of additional security groups for machines, where each ID + is presented in the format sg-xxxx. + items: + type: string + maxItems: 10 + type: array + amiID: + description: |- + AMIID is the AMI that should be used to boot the ec2 instance. + If set, the AMI should belong to the same region as the cluster. + type: string + iamProfile: + description: |- + IAMProfile is the name of the IAM instance profile to use for the machine. + Leave unset to have the installer create the IAM Profile on your behalf. + Cannot be specified together with iamRole. + type: string + iamRole: + description: |- + IAMRole is the name of the IAM Role to use for the instance profile of the machine. + Leave unset to have the installer create the IAM Role on your behalf. + Cannot be specified together with iamProfile. + type: string + metadataService: + description: EC2MetadataOptions defines metadata service interaction + options for EC2 instances in the machine pool. + properties: + authentication: + description: |- + Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. + When omitted, this means the user has no opinion and the value is left to the platform to choose a good + default, which is subject to change over time. The current default is optional. + At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API + https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + enum: + - Required + - Optional + type: string + type: object + rootVolume: + description: EC2RootVolume defines the root volume for EC2 + instances in the machine pool. + properties: + iops: + description: |- + IOPS defines the amount of provisioned IOPS. (KiB/s). IOPS may only be set for + io1, io2, & gp3 volume types. + minimum: 0 + type: integer + kmsKeyARN: + description: |- + The KMS key that will be used to encrypt the EBS volume. + If no key is provided the default KMS key for the account will be used. + https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetEbsDefaultKmsKeyId.html + type: string + size: + description: Size defines the size of the volume in gibibytes + (GiB). + minimum: 0 + type: integer + type: + description: Type defines the type of the volume. + type: string + required: + - size + - type + type: object + type: + description: |- + InstanceType defines the ec2 instance type. + eg. m4-large + type: string + zones: + description: Zones is list of availability zones that can + be used. + items: + type: string + type: array + type: object + azure: + description: Azure is the configuration used when installing on + Azure. + properties: + encryptionAtHost: + description: EncryptionAtHost enables encryption at the VM + host. + type: boolean + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskEncryptionSet: + description: DiskEncryptionSet defines a disk encryption + set. + properties: + name: + description: Name is the name of the disk encryption + set. + type: string + resourceGroup: + description: |- + ResourceGroup defines the Azure resource group used by the disk + encryption set. + type: string + subscriptionId: + description: |- + SubscriptionID defines the Azure subscription the disk encryption + set is in. + type: string + required: + - name + - resourceGroup + - subscriptionId + type: object + diskSizeGB: + description: DiskSizeGB defines the size of disk in GB. + format: int32 + minimum: 0 + type: integer + diskType: + description: |- + DiskType defines the type of disk. + For control plane nodes, the valid values are Premium_LRS and StandardSSD_LRS. + Default is Premium_LRS. + enum: + - Standard_LRS + - Premium_LRS + - StandardSSD_LRS + type: string + securityProfile: + description: SecurityProfile specifies the security profile + for the managed disk. + properties: + diskEncryptionSet: + description: |- + DiskEncryptionSet specifies the customer managed disk encryption set resource id for the + managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and + VMGuestState blob. + properties: + name: + description: Name is the name of the disk encryption + set. + type: string + resourceGroup: + description: |- + ResourceGroup defines the Azure resource group used by the disk + encryption set. + type: string + subscriptionId: + description: |- + SubscriptionID defines the Azure subscription the disk encryption + set is in. + type: string + required: + - name + - resourceGroup + - subscriptionId + type: object + securityEncryptionType: + description: |- + SecurityEncryptionType specifies the encryption type of the managed disk. + It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState + blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. + When set to VMGuestStateOnly, the VTpmEnabled should be set to true. + When set to DiskWithVMGuestState, both SecureBootEnabled and VTpmEnabled should be set to true. + It can be set only for Confidential VMs. + enum: + - VMGuestStateOnly + - DiskWithVMGuestState + type: string + type: object + required: + - diskSizeGB + type: object + osImage: + description: OSImage defines the image to use for the OS. + properties: + offer: + description: Offer is the offer of the image. + type: string + plan: + description: |- + Plan is the purchase plan of the image. + If omitted, it defaults to "WithPurchasePlan". + enum: + - WithPurchasePlan + - NoPurchasePlan + type: string + publisher: + description: Publisher is the publisher of the image. + type: string + sku: + description: SKU is the SKU of the image. + type: string + version: + description: Version is the version of the image. + type: string + required: + - offer + - publisher + - sku + - version + type: object + settings: + description: |- + Settings specify the security type and the UEFI settings of the virtual machine. This field can + be set for Confidential VMs and Trusted Launch for VMs. + properties: + confidentialVM: + description: |- + ConfidentialVM specifies the security configuration of the virtual machine. + For more information regarding Confidential VMs, please refer to: + https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview + properties: + uefiSettings: + description: UEFISettings specifies the security settings + like secure boot and vTPM used while creating the + virtual machine. + properties: + secureBoot: + description: |- + SecureBoot specifies whether secure boot should be enabled on the virtual machine. + Secure Boot verifies the digital signature of all boot components and halts the boot process if + signature verification fails. + If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + enum: + - Enabled + - Disabled + type: string + virtualizedTrustedPlatformModule: + description: |- + VirtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. + When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. + The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. + This is required to be set to enabled if the SecurityEncryptionType is defined. + If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + enum: + - Enabled + - Disabled + type: string + type: object + required: + - uefiSettings + type: object + securityType: + description: |- + SecurityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to + enable secure boot and vTPM. The default behavior is: secure boot and vTPM will not be enabled unless this property is set. + enum: + - ConfidentialVM + - TrustedLaunch + type: string + trustedLaunch: + description: |- + TrustedLaunch specifies the security configuration of the virtual machine. + For more information regarding TrustedLaunch for VMs, please refer to: + https://learn.microsoft.com/azure/virtual-machines/trusted-launch + properties: + uefiSettings: + description: UEFISettings specifies the security settings + like secure boot and vTPM used while creating the + virtual machine. + properties: + secureBoot: + description: |- + SecureBoot specifies whether secure boot should be enabled on the virtual machine. + Secure Boot verifies the digital signature of all boot components and halts the boot process if + signature verification fails. + If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + enum: + - Enabled + - Disabled + type: string + virtualizedTrustedPlatformModule: + description: |- + VirtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. + When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. + The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. + This is required to be set to enabled if the SecurityEncryptionType is defined. + If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + enum: + - Enabled + - Disabled + type: string + type: object + required: + - uefiSettings + type: object + required: + - securityType + type: object + type: + description: |- + InstanceType defines the azure instance type. + eg. Standard_DS_V2 + type: string + ultraSSDCapability: + description: ultraSSDCapability defines if the instance should + use Ultra SSD disks. + enum: + - Enabled + - Disabled + type: string + vmNetworkingType: + description: |- + VMNetworkingType specifies whether to enable accelerated networking. + Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, greatly improving its + networking performance. + eg. values: "Accelerated", "Basic" + enum: + - Accelerated + - Basic + type: string + zones: + description: |- + Zones is list of availability zones that can be used. + eg. ["1", "2", "3"] + items: + type: string + type: array + type: object + baremetal: + description: BareMetal is the configuration used when installing + on bare metal. + type: object + gcp: + description: GCP is the configuration used when installing on + GCP + properties: + confidentialCompute: + default: Disabled + description: |- + ConfidentialCompute Defines whether the instance should have confidential compute enabled. + If enabled OnHostMaintenance is required to be set to "Terminate". + If omitted, the platform chooses a default, which is subject to change over time, currently that default is false. + enum: + - Enabled + - Disabled + type: string + onHostMaintenance: + default: Migrate + description: |- + OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. + Allowed values are "Migrate" and "Terminate". + If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate". + enum: + - Migrate + - Terminate + type: string + osDisk: + description: OSDisk defines the storage for instance. + properties: + DiskSizeGB: + description: DiskSizeGB defines the size of disk in GB. + format: int64 + maximum: 65536 + minimum: 16 + type: integer + diskType: + description: |- + DiskType defines the type of disk. + For control plane nodes, the valid values are pd-balanced, pd-ssd, and hyperdisk-balanced. + enum: + - pd-balanced + - pd-ssd + - pd-standard + - hyperdisk-balanced + type: string + encryptionKey: + description: EncryptionKey defines the KMS key to be used + to encrypt the disk. + properties: + kmsKey: + description: KMSKey is a reference to a KMS Key to + use for the encryption. + properties: + keyRing: + description: KeyRing is the name of the KMS Key + Ring which the KMS Key belongs to. + type: string + location: + description: Location is the GCP location in which + the Key Ring exists. + type: string + name: + description: Name is the name of the customer + managed encryption key to be used for the disk + encryption. + type: string + projectID: + description: |- + ProjectID is the ID of the Project in which the KMS Key Ring exists. + Defaults to the VM ProjectID if not set. + type: string + required: + - keyRing + - location + - name + type: object + kmsKeyServiceAccount: + description: |- + KMSKeyServiceAccount is the service account being used for the + encryption request for the given KMS key. If absent, the Compute + Engine default service account is used. + See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account + for details on the default service account. + type: string + type: object + required: + - DiskSizeGB + type: object + osImage: + description: OSImage defines a custom image for instance. + properties: + name: + description: Name defines the name of the image. + type: string + project: + description: Project defines the name of the project containing + the image. + type: string + required: + - name + - project + type: object + secureBoot: + description: |- + SecureBoot Defines whether the instance should have secure boot enabled. + secure boot Verify the digital signature of all boot components, and halt the boot process if signature verification fails. + If omitted, the platform chooses a default, which is subject to change over time, currently that default is false. + enum: + - Enabled + - Disabled + type: string + serviceAccount: + description: |- + ServiceAccount is the email of a gcp service account to be used during installations. + The provided service account can be attached to both control-plane nodes + and worker nodes in order to provide the permissions required by the cloud provider. + type: string + tags: + description: Tags defines a set of network tags which will + be added to instances in the machineset + items: + type: string + type: array + type: + description: |- + InstanceType defines the GCP instance type. + eg. n1-standard-4 + type: string + zones: + description: Zones is list of availability zones that can + be used. + items: + type: string + type: array + type: object + ibmcloud: + description: IBMCloud is the configuration used when installing + on IBM Cloud. + properties: + bootVolume: + description: BootVolume is the configuration for the machine's + boot volume. + properties: + encryptionKey: + description: |- + EncryptionKey is the CRN referencing a Key Protect or Hyper Protect + Crypto Services key to use for volume encryption. If not specified, a + provider managed encryption key will be used. + type: string + type: object + dedicatedHosts: + description: DedicatedHosts is the configuration for the machine's + dedicated host and profile. + items: + description: DedicatedHost stores the configuration for + the machine's dedicated host platform. + properties: + name: + description: |- + Name is the name of the dedicated host to provision the machine on. If + specified, machines will be created on pre-existing dedicated host. + type: string + profile: + description: |- + Profile is the profile ID for the dedicated host. If specified, new + dedicated host will be created for machines. + type: string + type: object + type: array + type: + description: InstanceType is the VSI machine profile. + type: string + zones: + description: Zones is the list of availability zones used + for machines in the pool. + items: + type: string + type: array + type: object + nutanix: + description: Nutanix is the configuration used when installing + on Nutanix. + properties: + bootType: + description: |- + BootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. + If this field is empty or omitted, the VM will use the default boot type "Legacy" to boot. + "SecureBoot" depends on "UEFI" boot, i.e., enabling "SecureBoot" means that "UEFI" boot is also enabled. + enum: + - "" + - Legacy + - UEFI + - SecureBoot + type: string + categories: + description: |- + Categories optionally adds one or more prism categories (each with key and value) for + the Machine's VM to associate with. All the category key and value pairs specified must + already exist in the prism central. + items: + description: NutanixCategory identifies a pair of prism + category key and value + properties: + key: + description: key is the prism category key name + maxLength: 64 + minLength: 1 + type: string + value: + description: value is the prism category value associated + with the key + maxLength: 64 + minLength: 1 + type: string + required: + - key + - value + type: object + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + coresPerSocket: + description: |- + NumCoresPerSocket is the number of cores per socket in a vm. The number + of vCPUs on the vm will be NumCPUs times NumCoresPerSocket. + For example: 4 CPUs and 4 Cores per socket will result in 16 VPUs. + The AHV scheduler treats socket and core allocation exactly the same + so there is no benefit to configuring cores over CPUs. + format: int64 + type: integer + cpus: + description: NumCPUs is the total number of virtual processor + cores to assign a vm. + format: int64 + type: integer + dataDisks: + description: DataDisks holds information of the data disks + to attach to the Machine's VM + items: + description: DataDisk defines a data disk for a Machine + VM. + properties: + dataSourceImage: + description: dataSource refers to a data source image + for the VM disk. + properties: + name: + description: Name is the name of the storage container + resource in the Prism Element. + type: string + referenceName: + description: ReferenceName is the identifier of + the storage resource configured in the FailureDomain. + type: string + uuid: + description: UUID is the UUID of the storage container + resource in the Prism Element. + type: string + required: + - uuid + type: object + deviceProperties: + description: deviceProperties are the properties of + the disk device. + properties: + adapterType: + description: |- + adapterType is the adapter type of the disk address. + If the deviceType is "Disk", the valid adapterType can be "SCSI", "IDE", "PCI", "SATA" or "SPAPR". + If the deviceType is "CDRom", the valid adapterType can be "IDE" or "SATA". + enum: + - SCSI + - IDE + - PCI + - SATA + - SPAPR + type: string + deviceIndex: + default: 0 + description: |- + deviceIndex is the index of the disk address. The valid values are non-negative integers, with the default value 0. + For a Machine VM, the deviceIndex for the disks with the same deviceType.adapterType combination should + start from 0 and increase consecutively afterwards. Note that for each Machine VM, the Disk.SCSI.0 + and CDRom.IDE.0 are reserved to be used by the VM's system. So for dataDisks of Disk.SCSI and CDRom.IDE, + the deviceIndex should start from 1. + format: int32 + minimum: 0 + type: integer + deviceType: + default: Disk + description: |- + deviceType specifies the disk device type. + The valid values are "Disk" and "CDRom", and the default is "Disk". + enum: + - Disk + - CDRom + type: string + required: + - adapterType + - deviceIndex + - deviceType + type: object + diskSize: + anyOf: + - type: integer + - type: string + description: |- + diskSize is size (in Quantity format) of the disk to attach to the VM. + See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Format for the Quantity format and example documentation. + The minimum diskSize is 1GB. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageConfig: + description: storageConfig are the storage configuration + parameters of the VM disks. + properties: + diskMode: + allOf: + - enum: + - Standard + - Flash + - enum: + - Standard + - Flash + default: Standard + description: |- + diskMode specifies the disk mode. + The valid values are Standard and Flash, and the default is Standard. + type: string + storageContainer: + description: storageContainer refers to the storage_container + used by the VM disk. + properties: + name: + description: Name is the name of the storage + container resource in the Prism Element. + type: string + referenceName: + description: ReferenceName is the identifier + of the storage resource configured in the + FailureDomain. + type: string + uuid: + description: UUID is the UUID of the storage + container resource in the Prism Element. + type: string + required: + - uuid + type: object + required: + - diskMode + type: object + required: + - diskSize + type: object + type: array + x-kubernetes-list-type: set + failureDomains: + description: |- + FailureDomains optionally configures a list of failure domain names + that will be applied to the MachinePool + items: + type: string + type: array + x-kubernetes-list-type: set + gpus: + description: GPUs is a list of GPU devices to attach to the + machine's VM. + items: + description: NutanixGPU holds the identity of a Nutanix + GPU resource in the Prism Central + properties: + deviceID: + description: deviceID is the GPU device ID with the + integer value. + format: int32 + type: integer + name: + description: name is the GPU device name + type: string + type: + description: |- + type is the identifier type of the GPU device. + Valid values are Name and DeviceID. + enum: + - Name + - DeviceID + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: deviceID configuration is required when type + is DeviceID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''DeviceID'' ? has(self.deviceID) + : !has(self.deviceID)' + - message: name configuration is required when type is Name, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) + : !has(self.name)' + type: array + x-kubernetes-list-type: set + memoryMiB: + description: Memory is the size of a VM's memory in MiB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGiB: + description: DiskSizeGiB defines the size of disk in GiB. + format: int64 + type: integer + type: object + project: + description: Project optionally identifies a Prism project + for the Machine's VM to associate with. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: object + openstack: + description: OpenStack is the configuration used when installing + on OpenStack. + properties: + additionalNetworkIDs: + description: |- + AdditionalNetworkIDs contains IDs of additional networks for machines, + where each ID is presented in UUID v4 format. + Allowed address pairs won't be created for the additional networks. + items: + type: string + type: array + additionalSecurityGroupIDs: + description: |- + AdditionalSecurityGroupIDs contains IDs of additional security groups for machines, + where each ID is presented in UUID v4 format. + items: + type: string + type: array + rootVolume: + description: |- + RootVolume defines the root volume for instances in the machine pool. + The instances use ephemeral disks if not set. + properties: + size: + description: |- + Size defines the size of the volume in gibibytes (GiB). + Required + type: integer + type: + description: |- + Type defines the type of the volume. + Deprecated: Use Types instead. + type: string + types: + description: |- + Types is the list of the volume types of the root volumes. + This is mutually exclusive with Type. + items: + type: string + type: array + zones: + description: |- + Zones is the list of availability zones where the root volumes should be deployed. + If no zones are provided, all instances will be deployed on OpenStack Cinder default availability zone + items: + type: string + type: array + required: + - size + - types + type: object + serverGroupPolicy: + description: |- + ServerGroupPolicy will be used to create the Server Group that will contain all the machines of this MachinePool. + Defaults to "soft-anti-affinity". + enum: + - "" + - affinity + - soft-affinity + - anti-affinity + - soft-anti-affinity + type: string + type: + description: |- + FlavorName defines the OpenStack Nova flavor. + eg. m1.large + type: string + zones: + description: |- + Zones is the list of availability zones where the instances should be deployed. + If no zones are provided, all instances will be deployed on OpenStack Nova default availability zone + items: + type: string + type: array + required: + - type + type: object + ovirt: + description: Ovirt is the configuration used when installing on + oVirt. + properties: + affinityGroupsNames: + description: |- + AffinityGroupsNames contains a list of oVirt affinity group names that the newly created machines will join. + The affinity groups should exist on the oVirt cluster or created by the OpenShift installer. + items: + type: string + type: array + autoPinningPolicy: + description: |- + AutoPinningPolicy defines the policy to automatically set the CPU + and NUMA including pinning to the host for the instance. + When the field is omitted the default will be "none". + enum: + - none + - resize_and_pin + type: string + clone: + description: |- + Clone makes sure that the disks are cloned from the template and are not linked. + Defaults to true for high performance and server VM types, false for desktop types. + + Note: this option is not documented in the OpenShift documentation. This is intentional as it has sane defaults + that shouldn't be changed unless needed for debugging or resolving issues in cooperation with Red Hat support. + type: boolean + cpu: + description: CPU defines the VM CPU. + properties: + cores: + description: |- + Cores is the number of cores per socket. + Total CPUs is (Sockets * Cores) + format: int32 + type: integer + sockets: + description: |- + Sockets is the number of sockets for a VM. + Total CPUs is (Sockets * Cores) + format: int32 + type: integer + threads: + description: Threads is the number of CPU threads. + format: int32 + type: integer + required: + - cores + - sockets + - threads + type: object + format: + description: |- + Format is the disk format that the disks are in. Can be "cow" or "raw". "raw" disables several features that + may be needed, such as incremental backups. + enum: + - "" + - raw + - cow + type: string + hugepages: + description: Hugepages is the size of a VM's hugepages to + use in KiBs. + enum: + - 2048 + - 1048576 + format: int32 + type: integer + instanceTypeID: + description: |- + InstanceTypeID defines the VM instance type and overrides + the hardware parameters of the created VM, including cpu and memory. + If InstanceTypeID is passed, all memory and cpu variables will be ignored. + type: string + memoryMB: + description: MemoryMB is the size of a VM's memory in MiBs. + format: int32 + type: integer + osDisk: + description: OSDisk is the the root disk of the node. + properties: + sizeGB: + description: SizeGB size of the bootable disk in GiB. + format: int64 + type: integer + required: + - sizeGB + type: object + sparse: + description: Sparse indicates that sparse provisioning should + be used and disks should be not preallocated. + type: boolean + vmType: + description: VMType defines the workload type of the VM. + enum: + - "" + - desktop + - server + - high_performance + type: string + type: object + powervs: + description: PowerVS is the configuration used when installing + on IBM Power VS. + properties: + memoryGiB: + description: memoryGiB is the size of a virtual machine's + memory, in GiB. + format: int32 + type: integer + procType: + description: |- + ProcType defines the processor sharing model for the instance. + Must be one of {Capped, Dedicated, Shared}. + enum: + - Dedicated + - Shared + - Capped + - "" + type: string + processors: + anyOf: + - type: integer + - type: string + description: Processors defines the processing units for the + instance. + x-kubernetes-int-or-string: true + smtLevel: + description: SMTLevel specifies the level of SMT to set the + control plane and worker nodes to. + type: string + sysType: + description: SysType defines the system type for instance. + type: string + volumeIDs: + description: VolumeIDs is the list of volumes attached to + the instance. + items: + type: string + type: array + type: object + vsphere: + description: VSphere is the configuration used when installing + on vSphere. + properties: + coresPerSocket: + description: |- + NumCoresPerSocket is the number of cores per socket in a vm. The number + of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + format: int32 + type: integer + cpus: + description: NumCPUs is the total number of virtual processor + cores to assign a vm. + format: int32 + type: integer + dataDisks: + description: DataDisks are additional disks to add to the + VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add to the + VM that is not part of the VM OVA template. + properties: + name: + description: |- + name is used to identify the disk definition. name is required needs to be unique so that it can be used to + clearly identify purpose of the disk. + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + sizeGiB: + description: |- + sizeGiB is the size of the disk in GiB. + The maximum supported size is 16384 GiB. + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + memoryMB: + description: Memory is the size of a VM's memory in MB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGB: + description: DiskSizeGB defines the size of disk in GB. + format: int32 + type: integer + type: object + zones: + description: |- + Zones defines available zones + Zones is available in TechPreview. + items: + type: string + type: array + type: object + type: object + replicas: + description: Replicas is the machine count for the machine pool. + format: int64 + type: integer + required: + - name + - platform + type: object baseDomain: description: BaseDomain is the base domain to which the cluster should belong. @@ -140,6 +1237,7 @@ spec: Name is the name of the machine pool. For the control plane machine pool, the name will always be "master". For the compute machine pools, the only valid name is "worker". + For the arbiter machine pools, the only valid name is "arbiter". type: string platform: description: Platform is configuration for machine pool specific @@ -1242,6 +2340,7 @@ spec: Name is the name of the machine pool. For the control plane machine pool, the name will always be "master". For the compute machine pools, the only valid name is "worker". + For the arbiter machine pools, the only valid name is "arbiter". type: string platform: description: Platform is configuration for machine pool specific to diff --git a/pkg/asset/agent/installconfig.go b/pkg/asset/agent/installconfig.go index 00729539c43..107e4cde3ac 100644 --- a/pkg/asset/agent/installconfig.go +++ b/pkg/asset/agent/installconfig.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/api/features" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/releaseimage" @@ -215,9 +216,13 @@ func (a *OptionalInstallConfig) validateControlPlaneConfiguration(installConfig var fieldPath *field.Path if installConfig.ControlPlane != nil { - if *installConfig.ControlPlane.Replicas < 1 || *installConfig.ControlPlane.Replicas > 5 || *installConfig.ControlPlane.Replicas == 2 { + if *installConfig.ControlPlane.Replicas < 1 || *installConfig.ControlPlane.Replicas > 5 || (installConfig.Arbiter == nil && *installConfig.ControlPlane.Replicas == 2) { fieldPath = field.NewPath("ControlPlane", "Replicas") - allErrs = append(allErrs, field.Invalid(fieldPath, installConfig.ControlPlane.Replicas, fmt.Sprintf("ControlPlane.Replicas can only be set to 5, 4, 3, or 1. Found %v", *installConfig.ControlPlane.Replicas))) + supportedControlPlaneRange := "to 5, 4, 3, or 1" + if installConfig.EnabledFeatureGates().Enabled(features.FeatureGateHighlyAvailableArbiter) { + supportedControlPlaneRange = "between 1 and 5" + } + allErrs = append(allErrs, field.Invalid(fieldPath, installConfig.ControlPlane.Replicas, fmt.Sprintf("ControlPlane.Replicas can only be set %s. Found %v", supportedControlPlaneRange, *installConfig.ControlPlane.Replicas))) } } return allErrs diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go index 5fdbd6914b3..02c13b5974f 100644 --- a/pkg/asset/cluster/cluster.go +++ b/pkg/asset/cluster/cluster.go @@ -73,6 +73,7 @@ func (c *Cluster) Dependencies() []asset.Asset { &kubeconfig.AdminClient{}, &bootstrap.Bootstrap{}, &machine.Master{}, + &machine.Arbiter{}, &machine.Worker{}, &machines.Worker{}, &machines.ClusterAPI{}, diff --git a/pkg/asset/cluster/tfvars/tfvars.go b/pkg/asset/cluster/tfvars/tfvars.go index b226c8d4b59..2a9e04a63cc 100644 --- a/pkg/asset/cluster/tfvars/tfvars.go +++ b/pkg/asset/cluster/tfvars/tfvars.go @@ -105,7 +105,9 @@ func (t *TerraformVariables) Dependencies() []asset.Asset { new(rhcos.BootstrapImage), &bootstrap.Bootstrap{}, &machine.Master{}, + &machine.Arbiter{}, &machines.Master{}, + &machines.Arbiter{}, &machines.Worker{}, &baremetalbootstrap.IronicCreds{}, &installconfig.PlatformProvisionCheck{}, @@ -121,14 +123,16 @@ func (t *TerraformVariables) Generate(ctx context.Context, parents asset.Parents installConfig := &installconfig.InstallConfig{} bootstrapIgnAsset := &bootstrap.Bootstrap{} masterIgnAsset := &machine.Master{} + arbiterIgnAsset := &machine.Arbiter{} mastersAsset := &machines.Master{} + arbiterAsset := &machines.Arbiter{} workersAsset := &machines.Worker{} manifestsAsset := &manifests.Manifests{} rhcosImage := new(rhcos.Image) rhcosRelease := new(rhcos.Release) rhcosBootstrapImage := new(rhcos.BootstrapImage) ironicCreds := &baremetalbootstrap.IronicCreds{} - parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, ironicCreds) + parents.Get(clusterID, installConfig, bootstrapIgnAsset, arbiterIgnAsset, arbiterAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, ironicCreds) platform := installConfig.Config.Platform.Name() switch platform { diff --git a/pkg/asset/ignition/bootstrap/baremetal/template.go b/pkg/asset/ignition/bootstrap/baremetal/template.go index 13eb1397ef3..ba034b492d1 100644 --- a/pkg/asset/ignition/bootstrap/baremetal/template.go +++ b/pkg/asset/ignition/bootstrap/baremetal/template.go @@ -197,7 +197,7 @@ func GetTemplateData(config *baremetal.Platform, networks []types.MachineNetwork var dhcpAllowList []string for _, host := range config.Hosts { - if host.IsMaster() { + if host.IsMaster() || host.IsArbiter() { dhcpAllowList = append(dhcpAllowList, host.BootMACAddress) } } diff --git a/pkg/asset/ignition/bootstrap/baremetal/template_test.go b/pkg/asset/ignition/bootstrap/baremetal/template_test.go index 7a42d37b58a..21495870695 100644 --- a/pkg/asset/ignition/bootstrap/baremetal/template_test.go +++ b/pkg/asset/ignition/bootstrap/baremetal/template_test.go @@ -30,6 +30,10 @@ func TestTemplatingIPv4(t *testing.T) { Role: "master", BootMACAddress: "c0:ff:ee:ca:fe:02", }, + { + Role: "arbiter", + BootMACAddress: "c0:ff:ee:ca:fe:04", + }, { Role: "worker", BootMACAddress: "c0:ff:ee:ca:fe:03", @@ -48,7 +52,7 @@ func TestTemplatingIPv4(t *testing.T) { assert.Equal(t, result.ProvisioningCIDR, 24) assert.Equal(t, result.ProvisioningIPv6, false) assert.Equal(t, result.ProvisioningIP, "172.22.0.2") - assert.Equal(t, result.ProvisioningDHCPAllowList, "c0:ff:ee:ca:fe:00 c0:ff:ee:ca:fe:01 c0:ff:ee:ca:fe:02") + assert.Equal(t, result.ProvisioningDHCPAllowList, "c0:ff:ee:ca:fe:00 c0:ff:ee:ca:fe:01 c0:ff:ee:ca:fe:02 c0:ff:ee:ca:fe:04") assert.Equal(t, result.IronicUsername, "bootstrap-ironic-user") assert.Equal(t, result.IronicPassword, "passw0rd") } diff --git a/pkg/asset/ignition/bootstrap/common.go b/pkg/asset/ignition/bootstrap/common.go index a9e69e14fba..ebed99317be 100644 --- a/pkg/asset/ignition/bootstrap/common.go +++ b/pkg/asset/ignition/bootstrap/common.go @@ -127,6 +127,7 @@ func (a *Common) Dependencies() []asset.Asset { &mcign.MasterIgnitionCustomizations{}, &mcign.WorkerIgnitionCustomizations{}, &machines.Master{}, + &machines.Arbiter{}, &machines.Worker{}, &manifests.Manifests{}, &manifests.Openshift{}, @@ -315,6 +316,10 @@ func (a *Common) getTemplateData(dependencies asset.Parents, bootstrapInPlace bo // Generate platform-specific bootstrap data var platformData platformTemplateData + controlPlaneReplicas := *installConfig.Config.ControlPlane.Replicas + if installConfig.Config.Arbiter != nil { + controlPlaneReplicas += *installConfig.Config.Arbiter.Replicas + } switch installConfig.Config.Platform.Name() { case awstypes.Name: platformData.AWS = aws.GetTemplateData(installConfig.Config.Platform.AWS) @@ -322,7 +327,7 @@ func (a *Common) getTemplateData(dependencies asset.Parents, bootstrapInPlace bo platformData.BareMetal = baremetal.GetTemplateData( installConfig.Config.Platform.BareMetal, installConfig.Config.MachineNetwork, - *installConfig.Config.ControlPlane.Replicas, + controlPlaneReplicas, ironicCreds.Username, ironicCreds.Password, dependencies, @@ -600,6 +605,7 @@ func (a *Common) addParentFiles(dependencies asset.Parents) { &manifests.Manifests{}, &manifests.Openshift{}, &machines.Master{}, + &machines.Arbiter{}, &machines.Worker{}, &mcign.MasterIgnitionCustomizations{}, &mcign.WorkerIgnitionCustomizations{}, diff --git a/pkg/asset/ignition/machine/arbiter.go b/pkg/asset/ignition/machine/arbiter.go new file mode 100644 index 00000000000..d62ae39eaed --- /dev/null +++ b/pkg/asset/ignition/machine/arbiter.go @@ -0,0 +1,92 @@ +package machine + +import ( + "context" + "encoding/json" + "fmt" + "os" + + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/tls" +) + +const ( + arbiterIgnFilename = "arbiter.ign" +) + +// Arbiter is an asset that generates the ignition config for arbiter nodes. +type Arbiter struct { + Config *igntypes.Config + File *asset.File +} + +var _ asset.WritableAsset = (*Arbiter)(nil) + +// Dependencies returns the assets on which the Arbiter asset depends. +func (a *Arbiter) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.InstallConfig{}, + &tls.RootCA{}, + } +} + +// Generate generates the ignition config for the Arbiter asset. +func (a *Arbiter) Generate(_ context.Context, dependencies asset.Parents) error { + installConfig := &installconfig.InstallConfig{} + rootCA := &tls.RootCA{} + dependencies.Get(installConfig, rootCA) + + // Avoid creating ignition files when not an arbiter deployment. + if !installConfig.Config.IsArbiterEnabled() { + return nil + } + + a.Config = pointerIgnitionConfig(installConfig.Config, rootCA.Cert(), "arbiter") + + data, err := ignition.Marshal(a.Config) + if err != nil { + return fmt.Errorf("failed to marshal Ignition config: %w", err) + } + a.File = &asset.File{ + Filename: arbiterIgnFilename, + Data: data, + } + + return nil +} + +// Name returns the human-friendly name of the asset. +func (a *Arbiter) Name() string { + return "Arbiter Ignition Config" +} + +// Files returns the files generated by the asset. +func (a *Arbiter) Files() []*asset.File { + if a.File != nil { + return []*asset.File{a.File} + } + return []*asset.File{} +} + +// Load returns the arbiter ignitions from disk. +func (a *Arbiter) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(arbiterIgnFilename) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + config := &igntypes.Config{} + if err := json.Unmarshal(file.Data, config); err != nil { + return false, fmt.Errorf("failed to unmarshal %s: %w", arbiterIgnFilename, err) + } + + a.File, a.Config = file, config + return true, nil +} diff --git a/pkg/asset/ignition/machine/arbiter_ignition_customizations.go b/pkg/asset/ignition/machine/arbiter_ignition_customizations.go new file mode 100644 index 00000000000..f62244bd4a6 --- /dev/null +++ b/pkg/asset/ignition/machine/arbiter_ignition_customizations.go @@ -0,0 +1,97 @@ +package machine + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/sirupsen/logrus" + "sigs.k8s.io/yaml" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/tls" +) + +var ( + arbiterMachineConfigFileName = filepath.Join(directory, "99_openshift-installer-arbiter.yaml") +) + +// ArbiterIgnitionCustomizations is an asset that checks for any customizations a user might +// have made to the pointer ignition configs before creating the cluster. If customizations +// are made, then the updates are reconciled as a MachineConfig file. +type ArbiterIgnitionCustomizations struct { + File *asset.File +} + +var _ asset.WritableAsset = (*ArbiterIgnitionCustomizations)(nil) + +// Dependencies returns the dependencies for ArbiterIgnitionCustomizations. +func (a *ArbiterIgnitionCustomizations) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.InstallConfig{}, + &tls.RootCA{}, + &Arbiter{}, + } +} + +// Generate queries for input from the user. +func (a *ArbiterIgnitionCustomizations) Generate(_ context.Context, dependencies asset.Parents) error { + installConfig := &installconfig.InstallConfig{} + rootCA := &tls.RootCA{} + arbiter := &Arbiter{} + dependencies.Get(installConfig, rootCA, arbiter) + + // Avoid creating ignition customizations when not an arbiter deployment. + if !installConfig.Config.IsArbiterEnabled() { + return nil + } + + defaultPointerIgnition := pointerIgnitionConfig(installConfig.Config, rootCA.Cert(), "arbiter") + savedPointerIgnition := arbiter.Config + + savedPointerIgnitionJSON, err := ignition.Marshal(savedPointerIgnition) + if err != nil { + return fmt.Errorf("failed to Marshal savedPointerIgnition: %w", err) + } + defaultPointerIgnitionJSON, err := ignition.Marshal(defaultPointerIgnition) + if err != nil { + return fmt.Errorf("failed to Marshal defaultPointerIgnition: %w", err) + } + if string(savedPointerIgnitionJSON) != string(defaultPointerIgnitionJSON) { + logrus.Infof("Arbiter pointer ignition was modified. Saving contents to a machineconfig") + mc, err := generatePointerMachineConfig(*savedPointerIgnition, "arbiter") + if err != nil { + return fmt.Errorf("failed to generate arbiter installer machineconfig: %w", err) + } + configData, err := yaml.Marshal(mc) + if err != nil { + return fmt.Errorf("failed to marshal arbiter installer machineconfig: %w", err) + } + a.File = &asset.File{ + Filename: arbiterMachineConfigFileName, + Data: configData, + } + } + + return nil +} + +// Name returns the human-friendly name of the asset. +func (a *ArbiterIgnitionCustomizations) Name() string { + return "Arbiter Ignition Customization Check" +} + +// Files returns the files generated by the asset. +func (a *ArbiterIgnitionCustomizations) Files() []*asset.File { + if a.File != nil { + return []*asset.File{a.File} + } + return []*asset.File{} +} + +// Load does nothing, since we consume the ignition-configs. +func (a *ArbiterIgnitionCustomizations) Load(f asset.FileFetcher) (found bool, err error) { + return false, nil +} diff --git a/pkg/asset/ignition/machine/arbiter_ignition_customizations_test.go b/pkg/asset/ignition/machine/arbiter_ignition_customizations_test.go new file mode 100644 index 00000000000..bf6b280849c --- /dev/null +++ b/pkg/asset/ignition/machine/arbiter_ignition_customizations_test.go @@ -0,0 +1,125 @@ +package machine + +import ( + "context" + "testing" + + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/tls" + "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/aws" +) + +// TestArbiterIgnitionCustomizationsGenerate tests generating the arbiter ignition check asset. +func TestArbiterIgnitionCustomizationsGenerate(t *testing.T) { + cases := []struct { + name string + customize bool + assetExpected bool + installConfig *installconfig.InstallConfig + }{ + { + name: "not customized", + customize: false, + assetExpected: false, + installConfig: installconfig.MakeAsset( + &types.InstallConfig{ + Networking: &types.Networking{ + ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")}, + }, + Platform: types.Platform{ + AWS: &aws.Platform{ + Region: "us-east", + }, + }, + Arbiter: &types.MachinePool{ + Name: "arbiter", + Replicas: ptr.To(int64(1)), + }, + }), + }, + { + name: "pointer customized", + customize: true, + assetExpected: true, + installConfig: installconfig.MakeAsset( + &types.InstallConfig{ + Networking: &types.Networking{ + ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")}, + }, + Platform: types.Platform{ + AWS: &aws.Platform{ + Region: "us-east", + }, + }, + Arbiter: &types.MachinePool{ + Name: "arbiter", + Replicas: ptr.To(int64(1)), + }, + }), + }, + { + name: "pointer customized but arbiter not set", + customize: true, + assetExpected: false, + installConfig: installconfig.MakeAsset( + &types.InstallConfig{ + Networking: &types.Networking{ + ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")}, + }, + Platform: types.Platform{ + AWS: &aws.Platform{ + Region: "us-east", + }, + }, + }), + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + rootCA := &tls.RootCA{} + err := rootCA.Generate(context.Background(), nil) + assert.NoError(t, err, "unexpected error generating root CA") + + parents := asset.Parents{} + parents.Add(tc.installConfig, rootCA) + + arbiter := &Arbiter{} + err = arbiter.Generate(context.Background(), parents) + assert.NoError(t, err, "unexpected error generating arbiter asset") + + if tc.customize { + // Create empty config so that we force the pointer check to validate + // arbiter skip is also happening when customizations are triggered + // on non arbiter cluster. + if arbiter.Config == nil { + arbiter.Config = &igntypes.Config{} + } + // Modify the arbiter config, emulating a customization to the pointer. + arbiter.Config.Storage.Files = append(arbiter.Config.Storage.Files, + ignition.FileFromString("/etc/foo", "root", 0644, "foo")) + } + + parents.Add(arbiter) + arbiterIgnCheck := &ArbiterIgnitionCustomizations{} + err = arbiterIgnCheck.Generate(context.Background(), parents) + assert.NoError(t, err, "unexpected error generating arbiter ignition check asset") + + actualFiles := arbiterIgnCheck.Files() + if tc.assetExpected { + assert.Equal(t, 1, len(actualFiles), "unexpected number of files in arbiter state") + assert.Equal(t, arbiterMachineConfigFileName, actualFiles[0].Filename, "unexpected name for arbiter ignition config") + } else { + assert.Equal(t, 0, len(actualFiles), "unexpected number of files in arbiter state") + } + }) + } +} diff --git a/pkg/asset/ignition/machine/arbiter_test.go b/pkg/asset/ignition/machine/arbiter_test.go new file mode 100644 index 00000000000..e69206905ec --- /dev/null +++ b/pkg/asset/ignition/machine/arbiter_test.go @@ -0,0 +1,92 @@ +package machine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/tls" + "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/aws" +) + +// TestArbiterGenerate tests generating the arbiter asset. +func TestArbiterGenerate(t *testing.T) { + testCases := []struct { + expectedIgnitionConfigNames []string + installConfig *installconfig.InstallConfig + description string + }{ + { + description: "should generate with arbiter config", + expectedIgnitionConfigNames: []string{ + "arbiter.ign", + }, + installConfig: installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + BaseDomain: "test-domain", + Networking: &types.Networking{ + ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")}, + }, + Platform: types.Platform{ + AWS: &aws.Platform{ + Region: "us-east", + }, + }, + Arbiter: &types.MachinePool{ + Name: "arbiter", + Replicas: ptr.To(int64(1)), + }, + }), + }, + { + description: "should not generate arbiter ignition when no arbiter", + expectedIgnitionConfigNames: []string{}, + installConfig: installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + BaseDomain: "test-domain", + Networking: &types.Networking{ + ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")}, + }, + Platform: types.Platform{ + AWS: &aws.Platform{ + Region: "us-east", + }, + }, + }), + }, + } + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + rootCA := &tls.RootCA{} + err := rootCA.Generate(context.Background(), nil) + assert.NoError(t, err, "unexpected error generating root CA") + + parents := asset.Parents{} + parents.Add(tc.installConfig, rootCA) + + arbiter := &Arbiter{} + err = arbiter.Generate(context.Background(), parents) + assert.NoError(t, err, "unexpected error generating arbiter asset") + + actualFiles := arbiter.Files() + actualIgnitionConfigNames := make([]string, len(actualFiles)) + for i, f := range actualFiles { + actualIgnitionConfigNames[i] = f.Filename + } + assert.Equal(t, tc.expectedIgnitionConfigNames, actualIgnitionConfigNames, "unexpected names for arbiter ignition configs") + }) + } +} diff --git a/pkg/asset/machines/arbiter.go b/pkg/asset/machines/arbiter.go new file mode 100644 index 00000000000..2c16d9293f1 --- /dev/null +++ b/pkg/asset/machines/arbiter.go @@ -0,0 +1,394 @@ +package machines + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + baremetalapi "github.com/openshift/cluster-api-provider-baremetal/pkg/apis" + baremetalprovider "github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" + ovirtproviderapi "github.com/openshift/cluster-api-provider-ovirt/pkg/apis" + ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/machines/baremetal" + "github.com/openshift/installer/pkg/asset/machines/machineconfig" + "github.com/openshift/installer/pkg/asset/rhcos" + "github.com/openshift/installer/pkg/types" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + openstacktypes "github.com/openshift/installer/pkg/types/openstack" + vspheretypes "github.com/openshift/installer/pkg/types/vsphere" + ibmcloudapi "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis" + ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" +) + +// Arbiter generates the machines for the `arbiter` machine pool. +type Arbiter struct { + UserDataFile *asset.File + MachineConfigFiles []*asset.File + MachineFiles []*asset.File + IPClaimFiles []*asset.File + IPAddrFiles []*asset.File + + // SecretFiles is used by the baremetal platform to register the + // credential information for communicating with management + // controllers on hosts. + SecretFiles []*asset.File + + // NetworkConfigSecretFiles is used by the baremetal platform to + // store the networking configuration per host + NetworkConfigSecretFiles []*asset.File + + // HostFiles is the list of baremetal hosts provided in the + // installer configuration. + HostFiles []*asset.File +} + +const ( + + // arbiterMachineFileName is the format string for constucting the + // arbiter Machine filenames. + arbiterMachineFileName = "99_openshift-cluster-api_arbiter-machines-%s.yaml" + + // arbiterUserDataFileName is the filename used for the arbiter + // user-data secret. + arbiterUserDataFileName = "99_openshift-cluster-api_arbiter-user-data-secret.yaml" + + arbiterHostFileName = "99_openshift-cluster-api_arbiter_hosts-%s.yaml" + arbiterSecretFileName = "99_openshift-cluster-api_arbiter_host-bmc-secrets-%s.yaml" // #nosec G101 + arbiterNetworkConfigSecretFileName = "99_openshift-cluster-api_arbiter_host-network-config-secrets-%s.yaml" // #nosec G101 +) + +var ( + arbiterMachineFileNamePattern = fmt.Sprintf(arbiterMachineFileName, "*") + arbiterIPClaimFileNamePattern = fmt.Sprintf(ipClaimFileName, "*arbiter*") + arbiterIPAddressFileNamePattern = fmt.Sprintf(ipAddressFileName, "*arbiter*") + + _ asset.WritableAsset = (*Arbiter)(nil) +) + +// Name returns a human friendly name for the Arbiter Asset. +func (m *Arbiter) Name() string { + return "Arbiter Machines" +} + +// Dependencies returns all of the dependencies directly needed by the +// Arbiter asset. +func (m *Arbiter) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.ClusterID{}, + // PlatformCredsCheck just checks the creds (and asks, if needed) + // We do not actually use it in this asset directly, hence + // it is put in the dependencies but not fetched in Generate + &installconfig.PlatformCredsCheck{}, + &installconfig.InstallConfig{}, + new(rhcos.Image), + &machine.Arbiter{}, + } +} + +// Generate generates the Arbiter asset. +// +//nolint:gocyclo +func (m *Arbiter) Generate(ctx context.Context, dependencies asset.Parents) error { + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + rhcosImage := new(rhcos.Image) + mign := &machine.Arbiter{} + dependencies.Get(clusterID, installConfig, rhcosImage, mign) + + ic := installConfig.Config + + if ic.Arbiter == nil { + return nil + } + if ic.Platform.Name() != baremetaltypes.Name { + return fmt.Errorf("only BareMetal platform is supported for Arbiter deployments") + } + + pool := *ic.Arbiter + var err error + machines := []machinev1beta1.Machine{} + var ipClaims []ipamv1.IPAddressClaim + var ipAddrs []ipamv1.IPAddress + + mpool := defaultBareMetalMachinePoolPlatform() + mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.BareMetal) + pool.Platform.BareMetal = &mpool + + // Use managed user data secret, since we always have up to date images + // available in the cluster + arbiterUserDataSecretName := "arbiter-user-data-managed" // #nosec G101 + enabledCaps := installConfig.Config.GetEnabledCapabilities() + if enabledCaps.Has(configv1.ClusterVersionCapabilityMachineAPI) { + machines, err = baremetal.Machines(clusterID.InfraID, ic, &pool, "arbiter", arbiterUserDataSecretName) + if err != nil { + return fmt.Errorf("failed to create arbiter machine objects: %w", err) + } + + hostSettings, err := baremetal.ArbiterHosts(ic, machines, arbiterUserDataSecretName) + if err != nil { + return fmt.Errorf("failed to assemble host data: %w", err) + } + + hosts, err := createHostAssetFiles(hostSettings.Hosts, arbiterHostFileName) + if err != nil { + return err + } + m.HostFiles = append(m.HostFiles, hosts...) + + secrets, err := createSecretAssetFiles(hostSettings.Secrets, arbiterSecretFileName) + if err != nil { + return err + } + m.SecretFiles = append(m.SecretFiles, secrets...) + + networkSecrets, err := createSecretAssetFiles(hostSettings.NetworkConfigSecrets, arbiterNetworkConfigSecretFileName) + if err != nil { + return err + } + m.NetworkConfigSecretFiles = append(m.NetworkConfigSecretFiles, networkSecrets...) + } + + data, err := userDataSecret(arbiterUserDataSecretName, mign.File.Data) + if err != nil { + return fmt.Errorf("failed to create user-data secret for arbiter machines: %w", err) + } + + m.UserDataFile = &asset.File{ + Filename: filepath.Join(directory, arbiterUserDataFileName), + Data: data, + } + + machineConfigs := []*mcfgv1.MachineConfig{} + if pool.Hyperthreading == types.HyperthreadingDisabled { + ignHT, err := machineconfig.ForHyperthreadingDisabled("arbiter") + if err != nil { + return fmt.Errorf("failed to create ignition for hyperthreading disabled for arbiter machines: %w", err) + } + machineConfigs = append(machineConfigs, ignHT) + } + if ic.SSHKey != "" { + ignSSH, err := machineconfig.ForAuthorizedKeys(ic.SSHKey, "arbiter") + if err != nil { + return fmt.Errorf("failed to create ignition for authorized SSH keys for arbiter machines: %w", err) + } + machineConfigs = append(machineConfigs, ignSSH) + } + if ic.FIPS { + ignFIPS, err := machineconfig.ForFIPSEnabled("arbiter") + if err != nil { + return fmt.Errorf("failed to create ignition for FIPS enabled for arbiter machines: %w", err) + } + machineConfigs = append(machineConfigs, ignFIPS) + } + + // The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network. + // The cluster-network-operator handles the validation of this field. + // Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52 + if ic.Networking != nil && len(ic.Networking.ServiceNetwork) == 2 && + (ic.Platform.Name() == openstacktypes.Name || ic.Platform.Name() == vspheretypes.Name) { + // Only configure kernel args for dual-stack clusters. + ignIPv6, err := machineconfig.ForDualStackAddresses("arbiter") + if err != nil { + return fmt.Errorf("failed to create ignition to configure IPv6 for arbiter machines: %w", err) + } + machineConfigs = append(machineConfigs, ignIPv6) + } + + m.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "arbiter", directory) + if err != nil { + return fmt.Errorf("failed to create MachineConfig manifests for arbiter machines: %w", err) + } + + m.MachineFiles = make([]*asset.File, len(machines)) + + m.IPClaimFiles = make([]*asset.File, len(ipClaims)) + for i, claim := range ipClaims { + data, err := yaml.Marshal(claim) + if err != nil { + return fmt.Errorf("unable to marshal ip claim %v: %w", claim.Name, err) + } + + m.IPClaimFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(ipClaimFileName, claim.Name)), + Data: data, + } + } + + m.IPAddrFiles = make([]*asset.File, len(ipAddrs)) + for i, address := range ipAddrs { + data, err := yaml.Marshal(address) + if err != nil { + return fmt.Errorf("unable to marshal ip claim %v: %w", address.Name, err) + } + + m.IPAddrFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(ipAddressFileName, address.Name)), + Data: data, + } + } + + padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machines)))) + for i, machine := range machines { + data, err := yaml.Marshal(machine) + if err != nil { + return fmt.Errorf("marshal arbiter %d: %w", i, err) + } + + padded := fmt.Sprintf(padFormat, i) + m.MachineFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(arbiterMachineFileName, padded)), + Data: data, + } + } + return nil +} + +// Files returns the files generated by the asset. +func (m *Arbiter) Files() []*asset.File { + files := make([]*asset.File, 0, 1+len(m.MachineConfigFiles)+len(m.MachineFiles)) + if m.UserDataFile != nil { + files = append(files, m.UserDataFile) + } + files = append(files, m.MachineConfigFiles...) + // Hosts refer to secrets, so place the secrets before the hosts + // to avoid unnecessary reconciliation errors. + files = append(files, m.SecretFiles...) + files = append(files, m.NetworkConfigSecretFiles...) + // Machines are linked to hosts via the machineRef, so we create + // the hosts first to ensure if the operator starts trying to + // reconcile a machine it can pick up the related host. + files = append(files, m.HostFiles...) + files = append(files, m.MachineFiles...) + files = append(files, m.IPClaimFiles...) + files = append(files, m.IPAddrFiles...) + return files +} + +// Load reads the asset files from disk. +func (m *Arbiter) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(filepath.Join(directory, arbiterUserDataFileName)) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + m.UserDataFile = file + + m.MachineConfigFiles, err = machineconfig.Load(f, "arbiter", directory) + if err != nil { + return true, err + } + + var fileList []*asset.File + + fileList, err = f.FetchByPattern(filepath.Join(directory, secretFileNamePattern)) + if err != nil { + return true, err + } + m.SecretFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, networkConfigSecretFileNamePattern)) + if err != nil { + return true, err + } + m.NetworkConfigSecretFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, hostFileNamePattern)) + if err != nil { + return true, err + } + m.HostFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, arbiterMachineFileNamePattern)) + if err != nil { + return true, err + } + m.MachineFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, arbiterIPClaimFileNamePattern)) + if err != nil { + return true, err + } + m.IPClaimFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, arbiterIPAddressFileNamePattern)) + if err != nil { + return true, err + } + m.IPAddrFiles = fileList + + return true, nil +} + +// Machines returns arbiter Machine manifest structures. +func (m *Arbiter) Machines() ([]machinev1beta1.Machine, error) { + scheme := runtime.NewScheme() + utilruntime.Must(baremetalapi.AddToScheme(scheme)) + utilruntime.Must(ibmcloudapi.AddToScheme(scheme)) + utilruntime.Must(libvirtapi.AddToScheme(scheme)) + utilruntime.Must(ovirtproviderapi.AddToScheme(scheme)) + utilruntime.Must(machinev1beta1.AddToScheme(scheme)) + utilruntime.Must(machinev1.Install(scheme)) + + scheme.AddKnownTypes(machinev1alpha1.GroupVersion, + &machinev1alpha1.OpenstackProviderSpec{}, + ) + scheme.AddKnownTypes(machinev1beta1.SchemeGroupVersion, + &machinev1beta1.AWSMachineProviderConfig{}, + &machinev1beta1.VSphereMachineProviderSpec{}, + &machinev1beta1.AzureMachineProviderSpec{}, + &machinev1beta1.GCPMachineProviderSpec{}, + ) + scheme.AddKnownTypes(machinev1.GroupVersion, + &machinev1.NutanixMachineProviderConfig{}, + &machinev1.PowerVSMachineProviderConfig{}, + ) + + decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( + machinev1.GroupVersion, + baremetalprovider.SchemeGroupVersion, + ibmcloudprovider.SchemeGroupVersion, + libvirtprovider.SchemeGroupVersion, + machinev1alpha1.GroupVersion, + machinev1beta1.SchemeGroupVersion, + ovirtprovider.SchemeGroupVersion, + ) + + machines := []machinev1beta1.Machine{} + for i, file := range m.MachineFiles { + machine := &machinev1beta1.Machine{} + err := yaml.Unmarshal(file.Data, &machine) + if err != nil { + return machines, fmt.Errorf("unmarshal arbiter %d, %w", i, err) + } + + obj, _, err := decoder.Decode(machine.Spec.ProviderSpec.Value.Raw, nil, nil) + if err != nil { + return machines, fmt.Errorf("unmarshal arbiter %d: %w", i, err) + } + + machine.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj} + machines = append(machines, *machine) + } + + return machines, nil +} diff --git a/pkg/asset/machines/arbiter_test.go b/pkg/asset/machines/arbiter_test.go new file mode 100644 index 00000000000..c53c17d3d3b --- /dev/null +++ b/pkg/asset/machines/arbiter_test.go @@ -0,0 +1,334 @@ +package machines + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/rhcos" + "github.com/openshift/installer/pkg/types" + awstypes "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/baremetal" +) + +func TestArbiterGenerateMachineConfigs(t *testing.T) { + cases := []struct { + name string + key string + hyperthreading types.HyperthreadingMode + expectedMachineConfig []string + }{ + { + name: "no key hyperthreading enabled", + hyperthreading: types.HyperthreadingEnabled, + }, + { + name: "key present hyperthreading enabled", + key: "ssh-rsa: dummy-key", + hyperthreading: types.HyperthreadingEnabled, + expectedMachineConfig: []string{`apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + creationTimestamp: null + labels: + machineconfiguration.openshift.io/role: arbiter + name: 99-arbiter-ssh +spec: + baseOSExtensionsContainerImage: "" + config: + ignition: + version: 3.2.0 + passwd: + users: + - name: core + sshAuthorizedKeys: + - 'ssh-rsa: dummy-key' + extensions: null + fips: false + kernelArguments: null + kernelType: "" + osImageURL: "" +`}, + }, + { + name: "no key hyperthreading disabled", + hyperthreading: types.HyperthreadingDisabled, + expectedMachineConfig: []string{`apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + creationTimestamp: null + labels: + machineconfiguration.openshift.io/role: arbiter + name: 99-arbiter-disable-hyperthreading +spec: + baseOSExtensionsContainerImage: "" + config: + ignition: + version: 3.2.0 + extensions: null + fips: false + kernelArguments: + - nosmt + - smt-enabled=off + kernelType: "" + osImageURL: "" +`}, + }, + { + name: "key present hyperthreading disabled", + key: "ssh-rsa: dummy-key", + hyperthreading: types.HyperthreadingDisabled, + expectedMachineConfig: []string{`apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + creationTimestamp: null + labels: + machineconfiguration.openshift.io/role: arbiter + name: 99-arbiter-disable-hyperthreading +spec: + baseOSExtensionsContainerImage: "" + config: + ignition: + version: 3.2.0 + extensions: null + fips: false + kernelArguments: + - nosmt + - smt-enabled=off + kernelType: "" + osImageURL: "" +`, `apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + creationTimestamp: null + labels: + machineconfiguration.openshift.io/role: arbiter + name: 99-arbiter-ssh +spec: + baseOSExtensionsContainerImage: "" + config: + ignition: + version: 3.2.0 + passwd: + users: + - name: core + sshAuthorizedKeys: + - 'ssh-rsa: dummy-key' + extensions: null + fips: false + kernelArguments: null + kernelType: "" + osImageURL: "" +`}, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + parents := asset.Parents{} + parents.Add( + &installconfig.ClusterID{ + UUID: "test-uuid", + InfraID: "test-infra-id", + }, + installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + SSHKey: tc.key, + BaseDomain: "test-domain", + Platform: types.Platform{ + BareMetal: &baremetal.Platform{}, + }, + Arbiter: &types.MachinePool{ + Hyperthreading: tc.hyperthreading, + Replicas: ptr.To(int64(1)), + Platform: types.MachinePoolPlatform{ + BareMetal: &baremetal.MachinePool{}, + }, + }, + }), + rhcos.MakeAsset("test-image"), + (*rhcos.Release)(ptr.To("412.86.202208101040-0")), + &machine.Arbiter{ + File: &asset.File{ + Filename: "arbiter-ignition", + Data: []byte("test-ignition"), + }, + }, + ) + arbiter := &Arbiter{} + if err := arbiter.Generate(context.Background(), parents); err != nil { + t.Fatalf("failed to generate arbiter machines: %v", err) + } + expectedLen := len(tc.expectedMachineConfig) + if assert.Equal(t, expectedLen, len(arbiter.MachineConfigFiles)) { + for i := 0; i < expectedLen; i++ { + assert.Equal(t, tc.expectedMachineConfig[i], string(arbiter.MachineConfigFiles[i].Data), "unexepcted machine config contents") + } + } else { + assert.Equal(t, 0, len(arbiter.MachineConfigFiles), "expected no machine config files") + } + }) + } +} + +func TestArbiterInstallOnlyForBaremetal(t *testing.T) { + parents := asset.Parents{} + installConfig := installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + SSHKey: "ssh-rsa: dummy-key", + BaseDomain: "test-domain", + Platform: types.Platform{ + AWS: &awstypes.Platform{ + Region: "us-east-1", + DefaultMachinePlatform: &awstypes.MachinePool{ + InstanceType: "TEST_INSTANCE_TYPE", + }, + }, + }, + Arbiter: &types.MachinePool{ + Hyperthreading: types.HyperthreadingDisabled, + Replicas: ptr.To(int64(1)), + Platform: types.MachinePoolPlatform{ + BareMetal: &baremetal.MachinePool{}, + }, + }, + }) + + parents.Add( + &installconfig.ClusterID{ + UUID: "test-uuid", + InfraID: "test-infra-id", + }, + installConfig, + rhcos.MakeAsset("test-image"), + (*rhcos.Release)(ptr.To("412.86.202208101040-0")), + &machine.Arbiter{ + File: &asset.File{ + Filename: "arbiter-ignition", + Data: []byte("test-ignition"), + }, + }, + ) + arbiter := &Arbiter{} + err := arbiter.Generate(context.Background(), parents) + assert.NotNil(t, err, "expected arbiter generate to fail for non baremetal platforms") + assert.Contains(t, err.Error(), "only BareMetal platform is supported for Arbiter deployments") +} + +func TestArbiterIsNotModified(t *testing.T) { + parents := asset.Parents{} + installConfig := installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + SSHKey: "ssh-rsa: dummy-key", + BaseDomain: "test-domain", + Platform: types.Platform{ + BareMetal: &baremetal.Platform{ + ClusterProvisioningIP: "127.0.0.1", + DefaultMachinePlatform: &baremetal.MachinePool{}, + }, + }, + Arbiter: &types.MachinePool{ + Hyperthreading: types.HyperthreadingDisabled, + Replicas: ptr.To(int64(1)), + Platform: types.MachinePoolPlatform{ + BareMetal: &baremetal.MachinePool{}, + }, + }, + }) + + parents.Add( + &installconfig.ClusterID{ + UUID: "test-uuid", + InfraID: "test-infra-id", + }, + installConfig, + rhcos.MakeAsset("test-image"), + (*rhcos.Release)(ptr.To("412.86.202208101040-0")), + &machine.Arbiter{ + File: &asset.File{ + Filename: "arbiter-ignition", + Data: []byte("test-ignition"), + }, + }, + ) + arbiter := &Arbiter{} + if err := arbiter.Generate(context.Background(), parents); err != nil { + t.Fatalf("failed to generate arbiter machines: %v", err) + } + + if installConfig.Config.Arbiter.Platform.BareMetal == nil { + t.Fatalf("arbiter in the install config has been modified") + } +} + +func TestArbiterBaremetalGeneratedAssetFiles(t *testing.T) { + parents := asset.Parents{} + installConfig := installconfig.MakeAsset( + &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Platform: types.Platform{ + BareMetal: &baremetal.Platform{ + Hosts: []*baremetal.Host{ + { + Name: "arbiter-0", + Role: "arbiter", + BMC: baremetal.BMC{ + Username: "usr-0", + Password: "pwd-0", + }, + NetworkConfig: networkConfig("interfaces:"), + }, + }, + }, + }, + Arbiter: &types.MachinePool{ + Replicas: ptr.To(int64(1)), + Platform: types.MachinePoolPlatform{ + BareMetal: &baremetal.MachinePool{}, + }, + }, + }) + + parents.Add( + &installconfig.ClusterID{ + UUID: "test-uuid", + InfraID: "test-infra-id", + }, + installConfig, + rhcos.MakeAsset("test-image"), + (*rhcos.Release)(ptr.To("412.86.202208101040-0")), + &machine.Arbiter{ + File: &asset.File{ + Filename: "arbiter-ignition", + Data: []byte("test-ignition"), + }, + }, + ) + arbiter := &Arbiter{} + assert.NoError(t, arbiter.Generate(context.Background(), parents)) + + assert.Len(t, arbiter.HostFiles, 1) + verifyHost(t, arbiter.HostFiles[0], "openshift/99_openshift-cluster-api_arbiter_hosts-0.yaml", "arbiter-0") + + assert.Len(t, arbiter.SecretFiles, 1) + verifySecret(t, arbiter.SecretFiles[0], "openshift/99_openshift-cluster-api_arbiter_host-bmc-secrets-0.yaml", "arbiter-0-bmc-secret", "map[password:[112 119 100 45 48] username:[117 115 114 45 48]]") + + assert.Len(t, arbiter.NetworkConfigSecretFiles, 1) + verifySecret(t, arbiter.NetworkConfigSecretFiles[0], "openshift/99_openshift-cluster-api_arbiter_host-network-config-secrets-0.yaml", "arbiter-0-network-config-secret", "map[nmstate:[105 110 116 101 114 102 97 99 101 115 58 32 110 117 108 108 10]]") +} diff --git a/pkg/asset/machines/baremetal/hosts.go b/pkg/asset/machines/baremetal/hosts.go index 901f07c4a5d..ec4b8484982 100644 --- a/pkg/asset/machines/baremetal/hosts.go +++ b/pkg/asset/machines/baremetal/hosts.go @@ -116,10 +116,31 @@ func Hosts(config *types.InstallConfig, machines []machineapi.Machine, userDataS return nil, fmt.Errorf("no baremetal platform in configuration") } + numRequiredArbiters := replicaCount(config.Arbiter) + numArbiters := 0 + numRequiredMasters := len(machines) numMasters := 0 for _, host := range config.Platform.BareMetal.Hosts { + // We only infer arbiter hosts if we are in an arbiter deployment + if config.IsArbiterEnabled() { + // If we know the host is an arbiter and the role isn't an empty value (i.e. explicitly defined by user) + // no further investigating is needed, skip creating resources + if host.IsArbiter() { + numArbiters++ + continue + } + + // Infer if host is an arbiter, if we have already satisfied the number of masters + // the next hosts will be used for the arbiter, so skip those and allow ArbiterHosts() + // to configure and create those resources. + if numMasters == numRequiredMasters && numArbiters < numRequiredArbiters { + numArbiters++ + continue + } + } + secret, bmc := createSecret(host) if secret != nil { settings.Secrets = append(settings.Secrets, *secret) @@ -172,3 +193,98 @@ func Hosts(config *types.InstallConfig, machines []machineapi.Machine, userDataS return settings, nil } + +// ArbiterHosts returns the HostSettings with details of the hardware being +// used to construct a cluster with an arbiter node. +func ArbiterHosts(config *types.InstallConfig, machines []machineapi.Machine, userDataSecret string) (*HostSettings, error) { + settings := &HostSettings{} + + if config.Platform.BareMetal == nil { + return nil, fmt.Errorf("no baremetal platform in configuration") + } + + // If Arbiter is not enabled, nothing happens + if !config.IsArbiterEnabled() { + return nil, nil + } + + numRequiredMasters := replicaCount(config.ControlPlane) + numMasters := 0 + + numRequiredArbiters := len(machines) + numArbiters := 0 + for _, host := range config.Platform.BareMetal.Hosts { + // We make sure to keep an accurate count of known masters when explicitly set + if host.IsMaster() { + numMasters++ + } + + // If we know we're not an arbiter and the role isn't an empty value + // no further investigating is needed, skip creation. + if !host.IsArbiter() && host.Role != "" { + continue + } + + // In order to account for situations where we can not determine if the host is a master or an arbiter, + // we check if we have filled our required master count and if the host role is empty. + // In this scenario we increment and continue until the master count is satisfied before generating + // the arbiters since the masters should always be generated before arbiters. + if numMasters < numRequiredMasters && host.Role == "" { + numMasters++ + continue + } + + if numArbiters < numRequiredArbiters { + secret, bmc := createSecret(host) + if secret != nil { + settings.Secrets = append(settings.Secrets, *secret) + } + newHost := createBaremetalHost(host, bmc) + + if host.NetworkConfig != nil { + networkConfigSecret, err := createNetworkConfigSecret(host) + if err != nil { + return nil, err + } + settings.NetworkConfigSecrets = append(settings.NetworkConfigSecrets, *networkConfigSecret) + newHost.Spec.PreprovisioningNetworkDataName = networkConfigSecret.Name + } + + // Setting CustomDeploy early ensures that the + // corresponding Ironic node gets correctly configured + // from the beginning. + newHost.Spec.CustomDeploy = &baremetalhost.CustomDeploy{ + Method: "install_coreos", + } + + newHost.ObjectMeta.Labels = map[string]string{ + "installer.openshift.io/role": "control-plane", + } + + // Link the new host to the currently available machine + machine := machines[numArbiters] + newHost.Spec.ConsumerRef = &corev1.ObjectReference{ + APIVersion: machine.TypeMeta.APIVersion, + Kind: machine.TypeMeta.Kind, + Namespace: machine.ObjectMeta.Namespace, + Name: machine.ObjectMeta.Name, + } + newHost.Spec.Online = true + + // userDataSecret carries a reference to the arbiter ignition file + newHost.Spec.UserData = &corev1.SecretReference{Name: userDataSecret} + numArbiters++ + settings.Hosts = append(settings.Hosts, newHost) + } + } + + return settings, nil +} + +// replicaCount Given a machine pool, safely determine the replica count. +func replicaCount(pool *types.MachinePool) int { + if pool == nil || pool.Replicas == nil { + return 0 + } + return int(*pool.Replicas) +} diff --git a/pkg/asset/machines/baremetal/hosts_test.go b/pkg/asset/machines/baremetal/hosts_test.go index fa05a2c0f24..b27ef2ad1ba 100644 --- a/pkg/asset/machines/baremetal/hosts_test.go +++ b/pkg/asset/machines/baremetal/hosts_test.go @@ -32,6 +32,7 @@ routes: testCases := []struct { Scenario string Machines []machineapi.Machine + ArbiterMachines []machineapi.Machine Config *types.InstallConfig ExpectedSecrets []corev1.Secret ExpectedHosts []baremetalhost.BareMetalHost @@ -265,12 +266,202 @@ routes: host("master-2").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-2").customDeploy(), host("worker-0").annotation("baremetalhost.metal3.io/paused", "")).build(), }, + { + Scenario: "3-hosts-2-masters-no-arbiter-render", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + Config: configHosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("arbiter-0").bmc("usr2", "pwd2").role("arbiter")), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy()).build(), + }, + { + Scenario: "3-hosts-2-masters-1-arbiter", + Machines: machines( + machine("machine-0"), + machine("machine-1")), + ArbiterMachines: machines( + machine("machine-2")), + Config: config().withArbiter(1).hosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("arbiter-0").bmc("usr2", "pwd2").role("arbiter")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("arbiter-0-bmc-secret").creds("usr2", "pwd2"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-2").customDeploy(), + ).build(), + }, + { + Scenario: "3-hosts-2-masters-1-arbiter-norole", + Machines: machines( + machine("machine-0"), + machine("machine-1")), + ArbiterMachines: machines( + machine("machine-2")), + Config: config().withArbiter(1).withControlPlane(2).hosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("arbiter-0").bmc("usr2", "pwd2")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("arbiter-0-bmc-secret").creds("usr2", "pwd2"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-2").customDeploy(), + ).build(), + }, + { + Scenario: "3-hosts-2-masters-1-arbiter-norole-arbiter", + Machines: machines( + machine("machine-0"), + machine("machine-1")), + ArbiterMachines: machines( + machine("machine-2")), + Config: config().withArbiter(1).withControlPlane(2).hosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("master-1").bmc("usr1", "pwd1").role("master"), + hostType("arbiter-0").bmc("usr2", "pwd2")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("arbiter-0-bmc-secret").creds("usr2", "pwd2"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-2").customDeploy(), + ).build(), + }, + { + Scenario: "3-hosts-2-masters-1-arbiter-norole-master", + Machines: machines( + machine("machine-0"), + machine("machine-1")), + ArbiterMachines: machines( + machine("machine-2")), + Config: config().withArbiter(1).withControlPlane(2).hosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("arbiter-0").bmc("usr2", "pwd2").role("arbiter")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("arbiter-0-bmc-secret").creds("usr2", "pwd2"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-2").customDeploy(), + ).build(), + }, + { + Scenario: "5-hosts-3-masters-2-arbiter-mixed", + Machines: machines( + machine("machine-0"), + machine("machine-1"), + machine("machine-2")), + ArbiterMachines: machines( + machine("machine-3"), + machine("machine-4")), + Config: config().withArbiter(2).withControlPlane(3).hosts( + hostType("master-0").bmc("usr0", "pwd0").role("master"), + hostType("arbiter-0").bmc("usr3", "pwd3").role("arbiter"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("master-2").bmc("usr2", "pwd2"), + hostType("arbiter-1").bmc("usr4", "pwd4")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("master-2-bmc-secret").creds("usr2", "pwd2"), + secret("arbiter-0-bmc-secret").creds("usr3", "pwd3"), + secret("arbiter-1-bmc-secret").creds("usr4", "pwd4"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("master-2").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-2").customDeploy(), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-3").customDeploy(), + host("arbiter-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-4").customDeploy(), + ).build(), + }, + { + Scenario: "4-hosts-2-masters-1-arbiter-1-worker-norole", + Machines: machines( + machine("machine-0"), + machine("machine-1")), + ArbiterMachines: machines( + machine("machine-2")), + Config: config().withArbiter(1).withControlPlane(2).hosts( + hostType("master-0").bmc("usr0", "pwd0"), + hostType("master-1").bmc("usr1", "pwd1"), + hostType("arbiter-0").bmc("usr2", "pwd2"), + hostType("worker-0").bmc("usr3", "pwd3")). + build(), + + ExpectedSetting: settings(). + secrets( + secret("master-0-bmc-secret").creds("usr0", "pwd0"), + secret("master-1-bmc-secret").creds("usr1", "pwd1"), + secret("worker-0-bmc-secret").creds("usr3", "pwd3"), + secret("arbiter-0-bmc-secret").creds("usr2", "pwd2"), + ). + hosts( + host("master-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-0").customDeploy(), + host("master-1").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret").consumerRef("machine-1").customDeploy(), + host("worker-0").annotation("baremetalhost.metal3.io/paused", ""), + host("arbiter-0").label("installer.openshift.io/role", "control-plane").userDataRef("user-data-secret-arbiter").consumerRef("machine-2").customDeploy(), + ).build(), + }, } for _, tc := range testCases { t.Run(tc.Scenario, func(t *testing.T) { settings, err := Hosts(tc.Config, tc.Machines, "user-data-secret") + arbiterSettings, arbiterErr := ArbiterHosts(tc.Config, tc.ArbiterMachines, "user-data-secret-arbiter") + if tc.ExpectedError == "" { + assert.Nil(t, arbiterErr) + } + if settings != nil && arbiterSettings != nil { + settings.Hosts = append(settings.Hosts, arbiterSettings.Hosts...) + settings.Secrets = append(settings.Secrets, arbiterSettings.Secrets...) + } + if tc.ExpectedError != "" { assert.EqualError(t, err, tc.ExpectedError) } @@ -323,6 +514,22 @@ func (ib *installConfigBuilder) hosts(builders ...*hostTypeBuilder) *installConf return ib } +func (ib *installConfigBuilder) withArbiter(count int) *installConfigBuilder { + replicas := int64(count) + ib.Arbiter = &types.MachinePool{ + Replicas: &replicas, + } + return ib +} + +func (ib *installConfigBuilder) withControlPlane(count int) *installConfigBuilder { + replicas := int64(count) + ib.ControlPlane = &types.MachinePool{ + Replicas: &replicas, + } + return ib +} + type hostTypeBuilder struct { baremetaltypes.Host } diff --git a/pkg/asset/manifests/topologies.go b/pkg/asset/manifests/topologies.go index dadd3df6984..323c2c66278 100644 --- a/pkg/asset/manifests/topologies.go +++ b/pkg/asset/manifests/topologies.go @@ -29,6 +29,11 @@ func determineTopologies(installConfig *types.InstallConfig) (controlPlaneTopolo infrastructureTopology = configv1.HighlyAvailableTopologyMode } + if installConfig.Arbiter != nil { + controlPlaneTopology = configv1.HighlyAvailableArbiterMode + infrastructureTopology = configv1.HighlyAvailableTopologyMode + } + return controlPlaneTopology, infrastructureTopology } diff --git a/pkg/asset/store/assetcreate_test.go b/pkg/asset/store/assetcreate_test.go index 6f77ee2e949..c823f36283c 100644 --- a/pkg/asset/store/assetcreate_test.go +++ b/pkg/asset/store/assetcreate_test.go @@ -116,6 +116,8 @@ func TestCreatedAssetsAreNotDirty(t *testing.T) { } emptyAssets := map[string]bool{ + "Arbiter Ignition Config": true, // no files for non arbiter cluster + "Arbiter Machines": true, // no files for the 'none' platform "Master Machines": true, // no files for the 'none' platform "Worker Machines": true, // no files for the 'none' platform "Cluster API Manifests": true, // no files for the 'none' platform and ClusterAPIInstall feature gate not set diff --git a/pkg/asset/targets/targets.go b/pkg/asset/targets/targets.go index 79b685770d7..de6433bd6ab 100644 --- a/pkg/asset/targets/targets.go +++ b/pkg/asset/targets/targets.go @@ -27,6 +27,7 @@ var ( // Manifests are the manifests targeted assets. Manifests = []asset.WritableAsset{ &machines.Master{}, + &machines.Arbiter{}, &machines.Worker{}, &machines.ClusterAPI{}, &manifests.Manifests{}, @@ -54,6 +55,7 @@ var ( &kubeconfig.AdminClient{}, &password.KubeadminPassword{}, &machine.Master{}, + &machine.Arbiter{}, &machine.Worker{}, &bootstrap.Bootstrap{}, &cluster.Metadata{}, @@ -72,6 +74,7 @@ var ( Cluster = []asset.WritableAsset{ &cluster.Metadata{}, &machine.MasterIgnitionCustomizations{}, + &machine.ArbiterIgnitionCustomizations{}, &machine.WorkerIgnitionCustomizations{}, &tfvars.TerraformVariables{}, &kubeconfig.AdminClient{}, diff --git a/pkg/explain/printer_test.go b/pkg/explain/printer_test.go index bda110fed47..1d57c0d3d88 100644 --- a/pkg/explain/printer_test.go +++ b/pkg/explain/printer_test.go @@ -37,6 +37,10 @@ Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + arbiter + Arbiter is the configuration for the machines that comprise the +arbiter nodes. + baseDomain -required- BaseDomain is the base domain to which the cluster should belong. diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go index 14f2387d110..c79c1c5c00b 100644 --- a/pkg/types/baremetal/platform.go +++ b/pkg/types/baremetal/platform.go @@ -29,8 +29,9 @@ const ( ) const ( - masterRole string = "master" - workerRole string = "worker" + masterRole string = "master" + arbiterRole string = "arbiter" + workerRole string = "worker" ) // Host stores all the configuration data for a baremetal host. @@ -50,12 +51,17 @@ func (h *Host) IsMaster() bool { return h.Role == masterRole } +// IsArbiter checks if the current host is an arbiter. +func (h *Host) IsArbiter() bool { + return h.Role == arbiterRole +} + // IsWorker checks if the current host is a worker func (h *Host) IsWorker() bool { return h.Role == workerRole } -var sortIndex = map[string]int{masterRole: -1, workerRole: 0, "": 1} +var sortIndex = map[string]int{masterRole: -1, arbiterRole: 0, workerRole: 1, "": 2} // CompareByRole allows to compare two hosts by the Role func (h *Host) CompareByRole(k *Host) bool { diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index 189d20552e0..0400608fc5e 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -311,18 +311,28 @@ func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallCon } } + numRequiredArbiters := int64(0) + if installConfig.Arbiter != nil && installConfig.Arbiter.Replicas != nil { + numRequiredArbiters += *installConfig.Arbiter.Replicas + } + numMasters := int64(0) + numArbiters := int64(0) numWorkers := int64(0) for _, h := range hosts { if h.IsMaster() { numMasters++ + } else if h.IsArbiter() { + numArbiters++ } else if h.IsWorker() { numWorkers++ } else { logrus.Warn(fmt.Sprintf("Host %s hasn't any role configured", h.Name)) if numMasters < numRequiredMasters { numMasters++ + } else if numArbiters < numRequiredArbiters { + numArbiters++ } else if numWorkers < numRequiredWorkers { numWorkers++ } @@ -333,6 +343,10 @@ func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallCon return fmt.Errorf("not enough hosts found (%v) to support all the configured ControlPlane replicas (%v)", numMasters, numRequiredMasters) } + if numArbiters < numRequiredArbiters { + return fmt.Errorf("not enough hosts found (%v) to support all the configured Arbiter replicas (%v)", numArbiters, numRequiredArbiters) + } + if numWorkers < numRequiredWorkers { return fmt.Errorf("not enough hosts found (%v) to support all the configured Compute replicas (%v)", numWorkers, numRequiredWorkers) } diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index 2291c42bfcc..4dc6b7631c0 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -125,6 +125,11 @@ type InstallConfig struct { // +optional ControlPlane *MachinePool `json:"controlPlane,omitempty"` + // Arbiter is the configuration for the machines that comprise the + // arbiter nodes. + // +optional + Arbiter *MachinePool `json:"arbiter,omitempty"` + // Compute is the configuration for the machines that comprise the // compute nodes. // +optional @@ -247,6 +252,13 @@ func (c *InstallConfig) IsSingleNodeOpenShift() bool { return c.BootstrapInPlace != nil } +// IsArbiterEnabled returns if arbiter is enabled based off of the install-config arbiter machine pool. +func (c *InstallConfig) IsArbiterEnabled() bool { + return c.Arbiter != nil && + c.Arbiter.Replicas != nil && + *c.Arbiter.Replicas > 0 +} + // CPUPartitioningMode defines how the nodes should be setup for partitioning the CPU Sets. // +kubebuilder:validation:Enum=None;AllNodes type CPUPartitioningMode string diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index f98f76aa0d7..2283ba0b1d5 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -20,6 +20,8 @@ const ( MachinePoolEdgeRoleName = "edge" // MachinePoolControlPlaneRoleName name associated with the control plane machinepool. MachinePoolControlPlaneRoleName = "master" + // MachinePoolArbiterRoleName name associated with the control plane machinepool for smaller sized limited nodes. + MachinePoolArbiterRoleName = "arbiter" ) // HyperthreadingMode is the mode of hyperthreading for a machine. @@ -53,6 +55,7 @@ type MachinePool struct { // Name is the name of the machine pool. // For the control plane machine pool, the name will always be "master". // For the compute machine pools, the only valid name is "worker". + // For the arbiter machine pools, the only valid name is "arbiter". Name string `json:"name"` // Replicas is the machine count for the machine pool. diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 7593bb0bdd8..54ca4a25317 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -126,6 +126,13 @@ func ValidateInstallConfig(c *types.InstallConfig, usingAgentMethod bool) field. allErrs = append(allErrs, field.Required(field.NewPath("controlPlane"), "controlPlane is required")) } + if c.Arbiter != nil { + if c.EnabledFeatureGates().Enabled(features.FeatureGateHighlyAvailableArbiter) { + allErrs = append(allErrs, validateArbiter(&c.Platform, c.Arbiter, c.ControlPlane, field.NewPath("arbiter"))...) + } else { + allErrs = append(allErrs, field.Forbidden(field.NewPath("arbiter"), fmt.Sprintf("%s feature must be enabled in order to use arbiter cluster deployment", features.FeatureGateHighlyAvailableArbiter))) + } + } multiArchEnabled := types.MultiArchFeatureGateEnabled(c.Platform.Name(), c.EnabledFeatureGates()) allErrs = append(allErrs, validateCompute(&c.Platform, c.ControlPlane, c.Compute, field.NewPath("compute"), multiArchEnabled)...) @@ -749,6 +756,24 @@ func validateControlPlane(platform *types.Platform, pool *types.MachinePool, fld return allErrs } +func validateArbiter(platform *types.Platform, arbiterPool, masterPool *types.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if platform != nil && platform.BareMetal == nil { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("platform"), platform.Name(), []string{baremetal.Name})) + } + if arbiterPool.Name != types.MachinePoolArbiterRoleName { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("name"), arbiterPool.Name, []string{types.MachinePoolArbiterRoleName})) + } + if arbiterPool.Replicas != nil && *arbiterPool.Replicas == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), arbiterPool.Replicas, "number of arbiter replicas must be positive")) + } + if masterPool.Replicas == nil || *masterPool.Replicas < 2 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), masterPool.Replicas, "number of controlPlane replicas must be at least 2 for arbiter deployments")) + } + allErrs = append(allErrs, ValidateMachinePool(platform, arbiterPool, fldPath)...) + return allErrs +} + func validateComputeEdge(platform *types.Platform, pName string, fldPath *field.Path, pfld *field.Path) field.ErrorList { allErrs := field.ErrorList{} if platform.Name() != aws.Name {