From 8dfac7acf50063ab5233cc1d81fd05841b393ed6 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Tue, 22 Oct 2024 17:33:47 -0500 Subject: [PATCH 1/2] Support patching installer manifests With this change, you can use new API field `ClusterDeployment.Spec.Provisioning.CustomizationRef` to point to a ClusterDeploymentCustomization (hereinafter "CDC") object in the same namespace as the ClusterDeployment (CD). ClusterDeploymentCustomizations: CDC accepts a new subfield, `Spec.InstallerManifestPatches`, which consists of: - `Glob`: a string representing a file glob, relative to the installer working directory, matching one or more manifest files. - `Patches`: a list of `PatchEntity` representing RFC6902 JSON patches to apply to the matched manifest(s). Also, I got really annoyed having to type out `clusterdeploymentcustomizations` on the CLI, so I added abbreviation `cdc` to the schema. ClusterPools: CDC was already being used by ClusterPool-owned CDs to allow patching the install-config generated from the template referred to by `ClusterPool.Spec.InstallConfigSecretTemplateRef`. With this change, ClusterPool-owned CDs can start using manifest patches in two ways (not mutually exclusive): - Patches specific to the CD can be included in the `InstallerManifestPatches` field of the existing Inventory CDCs. - Patches applicable to all CDs in the pool can be provided by a CDC referenced via a new ClusterPool.Spec.CustomizationRef field. HIVE-1793 --- apis/hive/v1/clusterdeployment_types.go | 7 + .../clusterdeploymentcustomization_types.go | 23 ++- apis/hive/v1/clusterpool_types.go | 7 + apis/hive/v1/zz_generated.deepcopy.go | 55 ++++++ ...ft.io_clusterdeploymentcustomizations.yaml | 68 ++++++++ .../hive.openshift.io_clusterdeployments.yaml | 18 ++ .../crds/hive.openshift.io_clusterpools.yaml | 18 ++ docs/enhancements/clusterpool-inventory.md | 53 +++--- hack/app-sre/saas-template.yaml | 134 +++++++++++++++ .../clusterdeployment_controller.go | 58 +++++++ .../clusterdeployment_controller_test.go | 161 ++++++++++++++++++ .../clusterpool/clusterpool_controller.go | 77 ++++++++- .../clusterpool_controller_test.go | 127 +++++++++++--- pkg/controller/clusterpool/collections.go | 27 ++- pkg/controller/utils/clusterdeployment.go | 62 +++++++ pkg/controller/utils/sa.go | 5 + pkg/installmanager/installmanager.go | 69 +++++++- .../clusterdeploymentcustomization.go | 19 ++- pkg/test/clusterpool/clusterpool.go | 14 +- .../apis/hive/v1/clusterdeployment_types.go | 7 + .../clusterdeploymentcustomization_types.go | 23 ++- .../hive/apis/hive/v1/clusterpool_types.go | 7 + .../apis/hive/v1/zz_generated.deepcopy.go | 55 ++++++ 23 files changed, 1013 insertions(+), 81 deletions(-) diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index 8f143b18399..0d5eb328f72 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -218,6 +218,13 @@ type Provisioning struct { // +optional InstallConfigSecretRef *corev1.LocalObjectReference `json:"installConfigSecretRef,omitempty"` + // CustomizationRef is a reference to a ClusterDeploymentCustomization containing + // InstallerManifestPatches to be applied to the manifests generated by openshift-install prior + // to starting the installation. (InstallConfigPatches will be ignored -- those changes should + // be made directly to the install-config.yaml referenced by InstallConfigSecretRef.) + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"customizationRef,omitempty"` + // ReleaseImage is the image containing metadata for all components that run in the cluster, and // is the primary and best way to specify what specific version of OpenShift you wish to install. ReleaseImage string `json:"releaseImage,omitempty"` diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go index 5cf721f7c2b..1692d8089bc 100644 --- a/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -29,7 +29,7 @@ const ( // ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. // +kubebuilder:subresource:status // +k8s:openapi-gen=true -// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:resource:shortName=cdc,scope=Namespaced type ClusterDeploymentCustomization struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -42,6 +42,27 @@ type ClusterDeploymentCustomization struct { type ClusterDeploymentCustomizationSpec struct { // InstallConfigPatches is a list of patches to be applied to the install-config. InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` + + // InstallerManifestPatches is a list of patches to be applied to installer-generated manifests. + InstallerManifestPatches []InstallerManifestPatch `json:"installerManifestPatches,omitempty"` +} + +type InstallerManifestPatch struct { + // ManifestSelector identifies one or more manifests to patch + ManifestSelector ManifestSelector `json:"manifestSelector"` + + // Patches is a list of RFC6902 patches to apply to manifests identified by manifestSelector. + Patches []PatchEntity `json:"patches"` +} + +type ManifestSelector struct { + // Glob is a file glob (per https://pkg.go.dev/path/filepath#Glob) identifying one or more + // manifests. Paths should be relative to the installer's working directory. Examples: + // - openshift/99_role-cloud-creds-secret-reader.yaml + // - openshift/99_openshift-cluster-api_worker-machineset-*.yaml + // - */*secret* + // It is an error if a glob matches zero manifests. + Glob string `json:"glob"` } // PatchEntity represents a json patch (RFC 6902) to be applied diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index 8b1cdbaca8e..eca37072a75 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -102,6 +102,13 @@ type ClusterPoolSpec struct { // additional features of the installer. // +optional InstallerEnv []corev1.EnvVar `json:"installerEnv,omitempty"` + + // CustomizationRef refers to a ClusterDeploymentCustomization object whose InstallerManifestPatches should + // be applied to *all* ClusterDeployments created by this ClusterPool. This is in addition to any CDC from + // Inventory. The CDC must exist in the ClusterPool's namespace. It will be copied to the namespace of each + // ClusterDeployment generated by the ClusterPool. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"customizationRef,omitempty"` } type HibernationConfig struct { diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index d5fb7d8adea..146ee2b9fa9 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -742,6 +742,13 @@ func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymen *out = make([]PatchEntity, len(*in)) copy(*out, *in) } + if in.InstallerManifestPatches != nil { + in, out := &in.InstallerManifestPatches, &out.InstallerManifestPatches + *out = make([]InstallerManifestPatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1578,6 +1585,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -2778,6 +2790,28 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallerManifestPatch) DeepCopyInto(out *InstallerManifestPatch) { + *out = *in + out.ManifestSelector = in.ManifestSelector + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallerManifestPatch. +func (in *InstallerManifestPatch) DeepCopy() *InstallerManifestPatch { + if in == nil { + return nil + } + out := new(InstallerManifestPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { *out = *in @@ -3261,6 +3295,22 @@ func (in *ManageDNSGCPConfig) DeepCopy() *ManageDNSGCPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManifestSelector) DeepCopyInto(out *ManifestSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestSelector. +func (in *ManifestSelector) DeepCopy() *ManifestSelector { + if in == nil { + return nil + } + out := new(ManifestSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenStackClusterDeprovision) DeepCopyInto(out *OpenStackClusterDeprovision) { *out = *in @@ -3442,6 +3492,11 @@ func (in *Provisioning) DeepCopyInto(out *Provisioning) { *out = new(corev1.LocalObjectReference) **out = **in } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } if in.ImageSetRef != nil { in, out := &in.ImageSetRef, &out.ImageSetRef *out = new(ClusterImageSetReference) diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml index 8c47276d5ca..b0ed5b1f78b 100644 --- a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -10,6 +10,8 @@ spec: kind: ClusterDeploymentCustomization listKind: ClusterDeploymentCustomizationList plural: clusterdeploymentcustomizations + shortNames: + - cdc singular: clusterdeploymentcustomization scope: Namespaced versions: @@ -79,6 +81,72 @@ spec: - path type: object type: array + installerManifestPatches: + description: InstallerManifestPatches is a list of patches to be applied + to installer-generated manifests. + items: + properties: + manifestSelector: + description: ManifestSelector identifies one or more manifests + to patch + properties: + glob: + description: |- + Glob is a file glob (per https://pkg.go.dev/path/filepath#Glob) identifying one or more + manifests. Paths should be relative to the installer's working directory. Examples: + - openshift/99_role-cloud-creds-secret-reader.yaml + - openshift/99_openshift-cluster-api_worker-machineset-*.yaml + - */*secret* + It is an error if a glob matches zero manifests. + type: string + required: + - glob + type: object + patches: + description: Patches is a list of RFC6902 patches to apply to + manifests identified by manifestSelector. + items: + description: PatchEntity represents a json patch (RFC 6902) + to be applied + properties: + from: + description: From is the json path to copy or move the + value from + type: string + op: + description: Op is the operation to perform. + enum: + - add + - remove + - replace + - move + - copy + - test + type: string + path: + description: Path is the json path to the value to be + modified + type: string + value: + description: |- + Value is the *string* value to be used in the operation. For more complex values, use + ValueJSON. + type: string + valueJSON: + description: |- + ValueJSON is a string representing a JSON object to be used in the operation. As such, + internal quotes must be escaped. If nonempty, Value is ignored. + type: string + required: + - op + - path + type: object + type: array + required: + - manifestSelector + - patches + type: object + type: array type: object status: description: ClusterDeploymentCustomizationStatus defines the observed diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index 361543a7d8d..3a363f78000 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -1017,6 +1017,24 @@ spec: Provisioning contains settings used only for initial cluster provisioning. May be unset in the case of adopted clusters. properties: + customizationRef: + description: |- + CustomizationRef is a reference to a ClusterDeploymentCustomization containing + InstallerManifestPatches to be applied to the manifests generated by openshift-install prior + to starting the installation. (InstallConfigPatches will be ignored -- those changes should + be made directly to the install-config.yaml referenced by InstallConfigSecretRef.) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic imageSetRef: description: |- ImageSetRef is a reference to a ClusterImageSet. If a value is specified for ReleaseImage, diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 0fec2ca6457..0f53a44d228 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -96,6 +96,24 @@ spec: pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ type: string type: object + customizationRef: + description: |- + CustomizationRef refers to a ClusterDeploymentCustomization object whose InstallerManifestPatches should + be applied to *all* ClusterDeployments created by this ClusterPool. This is in addition to any CDC from + Inventory. The CDC must exist in the ClusterPool's namespace. It will be copied to the namespace of each + ClusterDeployment generated by the ClusterPool. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic hibernateAfter: description: |- HibernateAfter will be applied to new ClusterDeployments created for the pool. HibernateAfter will transition diff --git a/docs/enhancements/clusterpool-inventory.md b/docs/enhancements/clusterpool-inventory.md index af3e1d0e472..8cebaa519a0 100644 --- a/docs/enhancements/clusterpool-inventory.md +++ b/docs/enhancements/clusterpool-inventory.md @@ -2,28 +2,27 @@ [HIVE-1367](https://issues.redhat.com/browse/HIVE-1367) -- [Clusterpool for on-prem cloud providers](#clusterpool-for-on-prem-cloud-providers) - - [Summary](#summary) - - [Problem Statement](#problem-statement) - - [Proposal](#proposal) - - [Summary](#summary-1) - - [`ClusterPool.Spec.Inventory`](#clusterpoolspecinventory) - - [How To Use](#how-to-use) - - [Validation](#validation) - - [`Size` and `MaxSize`](#size-and-maxsize) - - [Pool Version](#pool-version) - - [Handling Inventory Updates](#handling-inventory-updates) - - [Adding An Inventory](#adding-an-inventory) - - [Adding An Entry to the Inventory](#adding-an-entry-to-the-inventory) - - [Removing An Entry from the Inventory](#removing-an-entry-from-the-inventory) - - [Deleting The Inventory](#deleting-the-inventory) - - [Maintaining the lease of the ClusterDeploymentCustomization](#maintaining-the-lease-of-the-clusterdeploymentcustomization) - - [Fairness](#fairness) - - [Future](#future) - - [Alternatives](#alternatives) - - [Bespoke Inventory Definition](#bespoke-inventory-definition) - - [Full Spec](#full-spec) - - [Hooks](#hooks) +- [Summary](#summary) +- [Problem Statement](#problem-statement) +- [Proposal](#proposal) + - [Summary](#summary-1) + - [`ClusterPool.Spec.Inventory`](#clusterpoolspecinventory) + - [How To Use](#how-to-use) + - [Validation](#validation) + - [`Size` and `MaxSize`](#size-and-maxsize) + - [Pool Version](#pool-version) + - [Handling Inventory Updates](#handling-inventory-updates) + - [Adding An Inventory](#adding-an-inventory) + - [Adding An Entry to the Inventory](#adding-an-entry-to-the-inventory) + - [Removing An Entry from the Inventory](#removing-an-entry-from-the-inventory) + - [Deleting The Inventory](#deleting-the-inventory) + - [Maintaining the lease of the ClusterDeploymentCustomization](#maintaining-the-lease-of-the-clusterdeploymentcustomization) + - [Fairness](#fairness) +- [Future](#future) +- [Alternatives](#alternatives) + - [Bespoke Inventory Definition](#bespoke-inventory-definition) + - [Full Spec](#full-spec) + - [Hooks](#hooks) ## Summary @@ -59,7 +58,7 @@ spec: and ClusterDeploymentCustomization CR will look like ```yaml -apiVersion: v1 +apiVersion: hive.openshift.io/v1 kind: ClusterDeploymentCustomization metadata: name: foo-cluster-deployment-customization @@ -115,10 +114,10 @@ For the VSphere case, this allows the administrator to: - Create a ClusterDeploymentCustomization CR to patch `spec.metadata.name` field of the default install config generated by clusterpool controller. Please refer the section above of a sample CR. The content in `spec.installConfigPatches` field should be as follows ```yaml spec: - installConfigPatches: - - op: replace - path: metadata/name - value: foo + installConfigPatches: + - op: replace + path: metadata/name + value: foo ``` - Add the name of ClusterDeploymentCustomization CR to `clusterPool.spec.inventory.ClusterDeploymentCustomizations` list. For ClusterDeploymentCustomization with a name `foo-cluster-deployment-customization` the clusterpool should be configured as follows ```yaml diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 95c02e24966..9f60798bcc0 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -296,6 +296,8 @@ objects: kind: ClusterDeploymentCustomization listKind: ClusterDeploymentCustomizationList plural: clusterdeploymentcustomizations + shortNames: + - cdc singular: clusterdeploymentcustomization scope: Namespaced versions: @@ -376,6 +378,81 @@ objects: - path type: object type: array + installerManifestPatches: + description: InstallerManifestPatches is a list of patches to be + applied to installer-generated manifests. + items: + properties: + manifestSelector: + description: ManifestSelector identifies one or more manifests + to patch + properties: + glob: + description: 'Glob is a file glob (per https://pkg.go.dev/path/filepath#Glob) + identifying one or more + + manifests. Paths should be relative to the installer''s + working directory. Examples: + + - openshift/99_role-cloud-creds-secret-reader.yaml + + - openshift/99_openshift-cluster-api_worker-machineset-*.yaml + + - */*secret* + + It is an error if a glob matches zero manifests.' + type: string + required: + - glob + type: object + patches: + description: Patches is a list of RFC6902 patches to apply + to manifests identified by manifestSelector. + items: + description: PatchEntity represents a json patch (RFC 6902) + to be applied + properties: + from: + description: From is the json path to copy or move the + value from + type: string + op: + description: Op is the operation to perform. + enum: + - add + - remove + - replace + - move + - copy + - test + type: string + path: + description: Path is the json path to the value to be + modified + type: string + value: + description: 'Value is the *string* value to be used + in the operation. For more complex values, use + + ValueJSON.' + type: string + valueJSON: + description: 'ValueJSON is a string representing a JSON + object to be used in the operation. As such, + + internal quotes must be escaped. If nonempty, Value + is ignored.' + type: string + required: + - op + - path + type: object + type: array + required: + - manifestSelector + - patches + type: object + type: array type: object status: description: ClusterDeploymentCustomizationStatus defines the observed @@ -1762,6 +1839,35 @@ objects: May be unset in the case of adopted clusters.' properties: + customizationRef: + description: 'CustomizationRef is a reference to a ClusterDeploymentCustomization + containing + + InstallerManifestPatches to be applied to the manifests generated + by openshift-install prior + + to starting the installation. (InstallConfigPatches will be + ignored -- those changes should + + be made directly to the install-config.yaml referenced by + InstallConfigSecretRef.)' + properties: + name: + default: '' + description: 'Name of the referent. + + This field is effectively required, but due to backwards + compatibility is + + allowed to be empty. Instances of this type with an empty + value here are + + almost certainly wrong. + + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + type: object + x-kubernetes-map-type: atomic imageSetRef: description: 'ImageSetRef is a reference to a ClusterImageSet. If a value is specified for ReleaseImage, @@ -2968,6 +3074,34 @@ objects: pattern: "^([0-9]+(\\.[0-9]+)?(ns|us|\xB5s|ms|s|m|h))+$" type: string type: object + customizationRef: + description: 'CustomizationRef refers to a ClusterDeploymentCustomization + object whose InstallerManifestPatches should + + be applied to *all* ClusterDeployments created by this ClusterPool. + This is in addition to any CDC from + + Inventory. The CDC must exist in the ClusterPool''s namespace. + It will be copied to the namespace of each + + ClusterDeployment generated by the ClusterPool.' + properties: + name: + default: '' + description: 'Name of the referent. + + This field is effectively required, but due to backwards compatibility + is + + allowed to be empty. Instances of this type with an empty + value here are + + almost certainly wrong. + + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + type: object + x-kubernetes-map-type: atomic hibernateAfter: description: 'HibernateAfter will be applied to new ClusterDeployments created for the pool. HibernateAfter will transition diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 9ec0b42af09..7c203d20d36 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -95,6 +95,8 @@ const ( clusterImageSetNotFoundReason = "ClusterImageSetNotFound" clusterImageSetFoundReason = "ClusterImageSetFound" + custRefNotFoundReason = "CustomizationRefNotAvailable" + defaultDNSNotReadyTimeout = 10 * time.Minute dnsNotReadyReason = "DNSNotReady" dnsNotReadyTimedoutReason = "DNSNotReadyTimedOut" @@ -242,6 +244,43 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci return errors.Wrap(err, "cannot start watch on ClusterSyncs") } + // Watch for changes to ClusterDeploymentCustomizations. This covers the situation where + // CustomizationRef is in play, but the referenced CDC is created "after" the CD. We'll list + // all the CDs in the same namespace as the CDC and return Request{}s for those with a + // CustomizationRef pointing to this CDC (which may be none). + cl := r.(*ReconcileClusterDeployment).Client + if err := c.Watch( + source.Kind(mgr.GetCache(), &hivev1.ClusterDeploymentCustomization{}, + handler.TypedEnqueueRequestsFromMapFunc( + func(ctx context.Context, cdc *hivev1.ClusterDeploymentCustomization) []reconcile.Request { + cds := &hivev1.ClusterDeploymentList{} + if err := cl.List(context.Background(), cds, client.InNamespace(cdc.Namespace)); err != nil { + logger. + WithError(err). + WithField("namespace", cdc.Namespace). + Log(controllerutils.LogLevel(err), "failed to list ClusterDeployments in namespace") + return nil + } + matchingCDs := []reconcile.Request{} + for _, cd := range cds.Items { + if (cd.Spec.Provisioning != nil && + cd.Spec.Provisioning.CustomizationRef != nil && + cd.Spec.Provisioning.CustomizationRef.Name == cdc.Name) || + (cd.Spec.ClusterPoolRef != nil && + cd.Spec.ClusterPoolRef.CustomizationRef != nil && + cd.Spec.ClusterPoolRef.CustomizationRef.Name == cdc.Name) { + matchingCDs = append( + matchingCDs, + reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&cd)}, + ) + } + } + return matchingCDs + })), + ); err != nil { + return errors.Wrap(err, "cannot start watch on ClusterDeploymentCustomizations") + } + return nil } @@ -817,6 +856,25 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi return reconcile.Result{}, nil } + // Preflight check: make sure any referenced customizations exist + if _, err := controllerutils.LoadManifestPatches(r, cd, cdLog); err != nil { + cdLog.Info("CustomizationRef specified, but failed to load ClusterDeploymentCustomization") + conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck( + cd.Status.Conditions, + hivev1.RequirementsMetCondition, + corev1.ConditionFalse, + custRefNotFoundReason, + "Customization reference could not be loaded", + controllerutils.UpdateConditionIfReasonOrMessageChange) + if changed { + cd.Status.Conditions = conditions + // This will trigger another reconcile whether it succeeds (update) or fails (error). + return reconcile.Result{}, r.Status().Update(context.TODO(), cd) + } + // It is important that we requeue, since we may just be waiting for the CDC to exist + return reconcile.Result{}, err + } + // If we made it this far, RequirementsMet condition should be True: // // TODO: when https://github.com/openshift/hive/pull/1413 is implemented diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index f8bfe5165e2..1c8d20ad872 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -26,6 +26,7 @@ import ( remoteclientmock "github.com/openshift/hive/pkg/remoteclient/mock" testassert "github.com/openshift/hive/pkg/test/assert" testclusterdeployment "github.com/openshift/hive/pkg/test/clusterdeployment" + testcdc "github.com/openshift/hive/pkg/test/clusterdeploymentcustomization" testclusterdeprovision "github.com/openshift/hive/pkg/test/clusterdeprovision" tcp "github.com/openshift/hive/pkg/test/clusterprovision" testdnszone "github.com/openshift/hive/pkg/test/dnszone" @@ -2219,6 +2220,166 @@ platform: }) }, }, + { + name: "set RequirementsMet condition to False: CustomizationRefNotAvailable (Provisioning)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions(testClusterDeployment()) + cd.Spec.Provisioning.CustomizationRef = &corev1.LocalObjectReference{Name: "doesntexist"} + return cd + }(), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionFalse, + Reason: custRefNotFoundReason, + }, + }) + }, + }, + { + name: "do not set RequirementsMet condition for installed cluster with missing CustomizationRef (Provisioning)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions( + testInstalledClusterDeployment(time.Now())) + cd.Spec.Provisioning.CustomizationRef = &corev1.LocalObjectReference{Name: "doesntexist"} + return cd + }(), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + expectErr: true, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionUnknown, + Reason: "Initialized", + }, + }) + }, + }, + { + name: "clear RequirementsMet condition when CustomizationRef vivifies (Provisioning)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithDefaultConditions(testClusterDeploymentWithInitializedConditions(testClusterDeployment())) + cd.Spec.Provisioning.CustomizationRef = &corev1.LocalObjectReference{Name: "cdc"} + cd.Status.Conditions = addOrUpdateClusterDeploymentCondition(*cd, hivev1.RequirementsMetCondition, + corev1.ConditionFalse, custRefNotFoundReason, "test-message") + return cd + }(), + testcdc.FullBuilder(testNamespace, "cdc", scheme.GetScheme()).Build( + testcdc.WithManifestPatch("openshift/*.yaml", "/metadata/labels/foo", "add", "bar"), + ), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + expectPendingCreation: true, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionTrue, + Reason: "AllRequirementsMet", + }, + }) + }, + }, + { + name: "set RequirementsMet condition to False: CustomizationRefNotAvailable (ClusterPoolRef)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions(testClusterDeployment()) + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + CustomizationRef: &corev1.LocalObjectReference{Name: "doesntexist"}, + } + return cd + }(), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionFalse, + Reason: custRefNotFoundReason, + }, + }) + }, + }, + { + name: "do not set RequirementsMet condition for installed cluster with missing CustomizationRef (ClusterPoolRef)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions( + testInstalledClusterDeployment(time.Now())) + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + CustomizationRef: &corev1.LocalObjectReference{Name: "doesntexist"}, + } + return cd + }(), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + expectErr: true, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionUnknown, + Reason: "Initialized", + }, + }) + }, + }, + { + name: "clear RequirementsMet condition when CustomizationRef vivifies (ClusterPoolRef)", + existing: []runtime.Object{ + testInstallConfigSecretAWS(), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithDefaultConditions(testClusterDeploymentWithInitializedConditions(testClusterDeployment())) + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + CustomizationRef: &corev1.LocalObjectReference{Name: "cdc"}, + } + cd.Status.Conditions = addOrUpdateClusterDeploymentCondition(*cd, hivev1.RequirementsMetCondition, + corev1.ConditionFalse, custRefNotFoundReason, "test-message") + return cd + }(), + testcdc.FullBuilder(testNamespace, "cdc", scheme.GetScheme()).Build( + testcdc.WithManifestPatch("openshift/*.yaml", "/metadata/labels/foo", "add", "bar"), + ), + testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"), + testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"), + }, + expectPendingCreation: true, + validate: func(c client.Client, t *testing.T) { + cd := getCD(c) + testassert.AssertConditions(t, cd, []hivev1.ClusterDeploymentCondition{ + { + Type: hivev1.RequirementsMetCondition, + Status: corev1.ConditionTrue, + Reason: "AllRequirementsMet", + }, + }) + }, + }, { name: "clear legacy conditions", existing: []runtime.Object{ diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index 993e567899c..9f1e141ebc7 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/davegardnerisme/deephash" @@ -196,7 +197,19 @@ func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.Ty } var requests []reconcile.Request + addRequest := func(cpl *hivev1.ClusterPool) { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cpl.Namespace, + Name: cpl.Name, + }, + }) + } for _, cpl := range cpList.Items { + if cpl.Spec.CustomizationRef != nil && cpl.Spec.CustomizationRef.Name == cdc.Name { + addRequest(&cpl) + continue + } if cpl.Spec.Inventory == nil { continue } @@ -204,12 +217,7 @@ func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.Ty if entry.Name != cdc.Name { continue } - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: cpl.Namespace, - Name: cpl.Name, - }, - }) + addRequest(&cpl) break } } @@ -535,6 +543,7 @@ func calculatePoolVersion(clp *hivev1.ClusterPool) string { ba = append(ba, deephash.Hash(clp.Spec.BaseDomain)...) ba = append(ba, deephash.Hash(clp.Spec.ImageSetRef)...) ba = append(ba, deephash.Hash(clp.Spec.InstallConfigSecretTemplateRef)...) + ba = append(ba, deephash.Hash(clp.Spec.CustomizationRef)...) // Inventory changes the behavior of cluster pool, thus it needs to be in the pool version. // But to avoid redployment of clusters if inventory changes, a fixed string is added to pool version. // https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#pool-version @@ -686,6 +695,13 @@ func (r *ReconcileClusterPool) addClusters( errs = append(errs, fmt.Errorf("%s: %w", credentialsSecretDependent, err)) } + if clp.Spec.CustomizationRef != nil && clp.Spec.CustomizationRef.Name != "" { + custCDC := clp.Spec.CustomizationRef.Name + if cdcs.nonInventory == nil || cdcs.nonInventory.Name != custCDC { + errs = append(errs, fmt.Errorf("pool-wide customizationRef %s not found", custCDC)) + } + } + dependenciesError := utilerrors.NewAggregate(errs) if err := r.setMissingDependenciesCondition(clp, dependenciesError, logger); err != nil { @@ -772,6 +788,35 @@ func (r *ReconcileClusterPool) createCluster( var cd *hivev1.ClusterDeployment var secret *corev1.Secret var cdPos int + cdcCopies := []runtime.Object{} + copyCDC := func(cdc *hivev1.ClusterDeploymentCustomization, ref **corev1.LocalObjectReference, category string) { + // We could use DeepCopy(), but then we would have to scrub out things like resourceVersion. + // Sixes :shrug: + newCDC := &hivev1.ClusterDeploymentCustomization{ + TypeMeta: cdc.TypeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: cdc.Name, + Namespace: builder.Namespace, + Labels: cdc.Labels, + Annotations: cdc.Annotations, + }, + Spec: cdc.Spec, + } + + // These will be tacked onto `objs` later + cdcCopies = append(cdcCopies, newCDC) + logger.WithFields(log.Fields{ + "cdc": newCDC.Name, + "targetNamespace": newCDC.Namespace, + }).Debugf("Copying %s CDC into builder namespace", category) + + // Register the reference in the requested spot + *ref = &corev1.LocalObjectReference{Name: cdc.Name} + logger.WithFields(log.Fields{ + "cdc": newCDC.Name, + }).Debugf("Registered %s CDC", category) + } + // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { if cdTmp, ok := obj.(*hivev1.ClusterDeployment); ok { @@ -780,17 +825,27 @@ func (r *ReconcileClusterPool) createCluster( poolRef := poolReference(clp) cd.Spec.ClusterPoolRef = &poolRef if clp.Spec.Inventory != nil { + // Copy the CDC to the new cluster's namespace. If it contains manifest patches, + // they will be applied by the provisioner. cdc := cdcs.unassigned[0] + copyCDC(cdc, &cd.Spec.ClusterPoolRef.CustomizationRef, "inventory") if cdcs.reserved[cdc.Name] != nil || cdc.Status.ClusterDeploymentRef != nil || cdc.Status.ClusterPoolRef != nil { return nil, errors.Errorf("ClusterDeploymentCustomization %s is already reserved", cdc.Name) } - cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdc.Name} + } + // If a non-inventory CDC was requested via pool.Spec.CustomizationRef, copy it to the + // new cluster's namespace, and reference it from the CD, so it can be applied by the + // provisioner. + if cdcs.nonInventory != nil { + copyCDC(cdcs.nonInventory, &cd.Spec.Provisioning.CustomizationRef, "non-inventory") } } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { secret = secretTmp } } + // Copy in any CDCs + objs = append(objs, cdcCopies...) if err := r.patchInstallConfig(clp, cd, secret, cdcs); err != nil { return nil, err } @@ -800,7 +855,13 @@ func (r *ReconcileClusterPool) createCluster( objs[cdPos], objs[lastIndex] = objs[lastIndex], objs[cdPos] // Create the resources. for _, obj := range objs { - if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { + cobj := obj.(client.Object) + logger.WithFields(log.Fields{ + "kind": cobj.GetObjectKind(), + "namespace": cobj.GetNamespace(), + "name": cobj.GetName(), + }).Debug("Creating clusterresource object") + if err := r.Client.Create(context.Background(), cobj); err != nil { r.expectations.CreationObserved(poolKey) return nil, err } diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 7861bbf1638..fc1a236ffdf 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -9,19 +9,19 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -51,9 +51,11 @@ func TestReconcileClusterPool(t *testing.T) { // See calculatePoolVersion. If these change, the easiest way to figure out the new value is // to pull it from the test failure :) - initialPoolVersion := "ac2dc91b241dd33d" - inventoryPoolVersion := "06983eaafac7f695" - openstackPoolVersion := "0a55303c49151e0d" + initialPoolVersion := "04a8f79a4a2e9733" + inventoryPoolVersion := "a6dcbd6776a3c92b" + inventoryAndCustomizationPoolVersion := "f1a16535dbc27559" + customizationPoolVersion := "564f99a732a771e9" + openstackPoolVersion := "0be50b7ba396d313" poolBuilder := testcp.FullBuilder(testNamespace, testLeasePoolName, scheme). GenericOptions( @@ -137,8 +139,13 @@ func TestReconcileClusterPool(t *testing.T) { expectedUnassignedClaims int expectedAssignedCDs int expectedAssignedCDCs map[string]string - expectedRunning int - expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool. + // This will be: + // 2 if using inventory AND pool.Spec.CustomizationRef + // 1 if using one xor the other + // 0 if using neither + expectedCDCsInCDNamespaces int + expectedRunning int + expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool. // Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason. // (The clusterpool controller always sets this condition's Status to True.) // Not checked if nil. @@ -226,8 +233,42 @@ func TestReconcileClusterPool(t *testing.T) { expectedInventoryValidStatus: corev1.ConditionTrue, expectedPoolVersion: inventoryPoolVersion, expectedAssignedCDCs: map[string]string{"test-cdc-1": testLeasePoolName}, + expectedCDCsInCDNamespaces: 1, expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonInstallationPending}, }, + { + name: "cp with inventory and customizationRef", + existing: []runtime.Object{ + inventoryPoolBuilder().Build( + testcp.WithSize(1), + testcp.WithCustomizationRef("non-inventory-cdc")), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), + testcdc.FullBuilder(testNamespace, "non-inventory-cdc", scheme).Build(), + }, + expectedTotalClusters: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionTrue, + expectedPoolVersion: inventoryAndCustomizationPoolVersion, + expectedAssignedCDCs: map[string]string{"test-cdc-1": testLeasePoolName}, + expectedCDCsInCDNamespaces: 2, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonInstallationPending}, + }, + { + name: "cp with customizationRef", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithCustomizationRef("non-inventory-cdc")), + testcdc.FullBuilder(testNamespace, "non-inventory-cdc", scheme).Build(), + }, + expectedTotalClusters: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionUnknown, + expectedPoolVersion: customizationPoolVersion, + expectedCDCsInCDNamespaces: 1, + }, // TODO: Revise once https://issues.redhat.com/browse/HIVE-2284 solved /*{ name: "cp with inventory and available cdc deleted without hold", @@ -310,9 +351,10 @@ func TestReconcileClusterPool(t *testing.T) { "test-cdc-1": {fmt.Sprintf("hive.openshift.io/%s", testLeasePoolName)}, "test-cdc-2": {"hive.openshift.io/test-cp-2"}, }, - expectedTotalClusters: 1, - expectedPoolVersion: inventoryPoolVersion, - expectError: false, + expectedCDCsInCDNamespaces: 1, + expectedTotalClusters: 1, + expectedPoolVersion: inventoryPoolVersion, + expectError: false, }, { name: "cp with inventory and cdc doesn't exist is not valid - missing", @@ -384,6 +426,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedInventoryValidStatus: corev1.ConditionTrue, expectedPoolVersion: inventoryPoolVersion, expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedCDCsInCDNamespaces: 1, expectError: true, }, { @@ -391,7 +434,7 @@ func TestReconcileClusterPool(t *testing.T) { existing: []runtime.Object{ inventoryPoolBuilder().Build(testcp.WithSize(1)), testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( - testcdc.WithPatch("/broken/path", "replace", "x"), + testcdc.WithInstallConfigPatch("/broken/path", "replace", "x"), ), }, expectedTotalClusters: 0, @@ -543,6 +586,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedPoolVersion: inventoryPoolVersion, expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old"}, expectedAssignedCDCs: map[string]string{"test-cdc-successful-old": ""}, + expectedCDCsInCDNamespaces: 1, }, { name: "cp with inventory - correct prioritization - mix and multiple deployments", @@ -567,6 +611,7 @@ func TestReconcileClusterPool(t *testing.T) { "test-cdc-successful-old": "", "test-cdc-unused-new": "", }, + expectedCDCsInCDNamespaces: 1, }, { @@ -588,6 +633,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedPoolVersion: inventoryPoolVersion, expectedInventoryAssignmentOrder: []string{"test-cdc-successful-new"}, expectedAssignedCDCs: map[string]string{"test-cdc-successful-new": ""}, + expectedCDCsInCDNamespaces: 1, }, { name: "cp with inventory - release cdc when cd is missing", @@ -603,9 +649,10 @@ func TestReconcileClusterPool(t *testing.T) { testcdc.Reserved(), ), }, - expectedTotalClusters: 1, - expectedPoolVersion: inventoryPoolVersion, - expectedAssignedCDCs: map[string]string{"test-cdc-1": ""}, + expectedTotalClusters: 1, + expectedPoolVersion: inventoryPoolVersion, + expectedAssignedCDCs: map[string]string{"test-cdc-1": ""}, + expectedCDCsInCDNamespaces: 1, }, { name: "cp with inventory - fix cdc when cd reference exists", @@ -1285,17 +1332,28 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesMessage: `credentials secret: secrets "aws-creds" not found`, expectedCDCurrentStatus: corev1.ConditionUnknown, }, - + { + name: "missing pool-wide CDC", + existing: []runtime.Object{ + initializedPoolBuilder.Build(testcp.WithSize(1), testcp.WithCustomizationRef("non-inventory-cdc")), + }, + expectError: true, + expectedMissingDependenciesStatus: corev1.ConditionTrue, + expectedMissingDependenciesMessage: `pool-wide customizationRef non-inventory-cdc not found`, + expectPoolVersionChanged: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, { name: "multiple missing dependents", existing: []runtime.Object{ - initializedPoolBuilder.Build(testcp.WithSize(1)), + initializedPoolBuilder.Build(testcp.WithSize(1), testcp.WithCustomizationRef("non-inventory-cdc")), }, noClusterImageSet: true, noCredsSecret: true, expectError: true, expectedMissingDependenciesStatus: corev1.ConditionTrue, - expectedMissingDependenciesMessage: `[cluster image set: clusterimagesets.hive.openshift.io "test-image-set" not found, credentials secret: secrets "aws-creds" not found]`, + expectedMissingDependenciesMessage: `[cluster image set: clusterimagesets.hive.openshift.io "test-image-set" not found, credentials secret: secrets "aws-creds" not found, pool-wide customizationRef non-inventory-cdc not found]`, + expectPoolVersionChanged: true, expectedCDCurrentStatus: corev1.ConditionUnknown, }, { @@ -2120,7 +2178,10 @@ func TestReconcileClusterPool(t *testing.T) { } var actualAssignedCDs, actualUnassignedCDs, actualRunning, actualHibernating int + cdNamespaces := sets.Set[string]{} for _, cd := range cds.Items { + cdNamespaces.Insert(cd.Namespace) + poolRef := cd.Spec.ClusterPoolRef if poolRef == nil || poolRef.PoolName != testLeasePoolName || poolRef.ClaimName == "" { // TODO: Calling these "unassigned" isn't great. Some may not even belong to the pool. @@ -2168,7 +2229,8 @@ func TestReconcileClusterPool(t *testing.T) { err = fakeClient.List(context.Background(), claims) require.NoError(t, err) - actualAssignedClaims := 0 + // Set of namespaces of claimed clusters + actualAssignedClaims := sets.Set[string]{} actualUnassignedClaims := 0 for _, claim := range claims.Items { if test.expectedClaimPendingReasons != nil { @@ -2179,27 +2241,39 @@ func TestReconcileClusterPool(t *testing.T) { } } } - if claim.Spec.Namespace == "" { + if ns := claim.Spec.Namespace; ns == "" { actualUnassignedClaims++ } else { - actualAssignedClaims++ + actualAssignedClaims.Insert(ns) } } - assert.Equal(t, test.expectedAssignedClaims, actualAssignedClaims, "unexpected number of assigned claims") + assert.Equal(t, test.expectedAssignedClaims, len(actualAssignedClaims), "unexpected number of assigned claims") assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") cdcs := &hivev1.ClusterDeploymentCustomizationList{} + // Get CDCs from all namespaces err = fakeClient.List(context.Background(), cdcs) require.NoError(t, err) cdcMap := make(map[string]hivev1.ClusterDeploymentCustomization, len(cdcs.Items)) + // Keep track of the number of CDCs in each CD namespace. In the end, these should all + // be the same, equal to expectedCDCsInCDNamespaces + cdcsInCDNamespaces := map[string]int{} for _, cdc := range cdcs.Items { + // Ensure CDCs were copied to pool CD namespaces + if cdc.Namespace != pool.Namespace { + assert.Contains(t, cdNamespaces, cdc.Namespace, "expected CDC to have been copied to CD namespace") + cdcsInCDNamespaces[cdc.Namespace]++ + continue + } + cdcMap[cdc.Name] = cdc condition := meta.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) if test.expectedCDCReason != nil { if reason, ok := test.expectedCDCReason[cdc.Name]; ok { - assert.NotNil(t, condition) - assert.Equal(t, reason, condition.Reason, "expected CDC status to match") + if assert.NotNil(t, condition) { + assert.Equal(t, reason, condition.Reason, "expected CDC status to match") + } } } @@ -2217,6 +2291,9 @@ func TestReconcileClusterPool(t *testing.T) { } } } + for ns, actualCDCsInCDNamespace := range cdcsInCDNamespaces { + assert.Equal(t, test.expectedCDCsInCDNamespaces, actualCDCsInCDNamespace, "unexpected number of CDCs in namespace %s", ns) + } if order := test.expectedInventoryAssignmentOrder; len(order) > 0 { lastTime := metav1.NewTime(nowish.Add(24 * -time.Hour)) diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 9791c5bc2bd..3b3ccb3849f 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -640,12 +640,14 @@ type cdcCollection struct { byCDCName map[string]*hivev1.ClusterDeploymentCustomization // Namespace are all the CDC in the namespace mapped by name namespace map[string]*hivev1.ClusterDeploymentCustomization + // nonInventory is the (single) CDC referenced by ClusterPool.Spec.CustomizationRef, if any. + nonInventory *hivev1.ClusterDeploymentCustomization } // getAllCustomizationsForPool is the constructor for a cdcCollection for all of the // ClusterDeploymentCustomizations that are related to specified pool. func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logger log.FieldLogger) (*cdcCollection, error) { - if pool.Spec.Inventory == nil { + if pool.Spec.Inventory == nil && (pool.Spec.CustomizationRef == nil || pool.Spec.CustomizationRef.Name == "") { return &cdcCollection{}, nil } cdcList := &hivev1.ClusterDeploymentCustomizationList{} @@ -657,19 +659,28 @@ func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logg } cdcCol := cdcCollection{ - unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), - missing: make([]string, 0), - reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), - cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), - syntax: make(map[string]*hivev1.ClusterDeploymentCustomization), - byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), - namespace: make(map[string]*hivev1.ClusterDeploymentCustomization), + unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), + missing: make([]string, 0), + reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), + cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), + syntax: make(map[string]*hivev1.ClusterDeploymentCustomization), + byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), + namespace: make(map[string]*hivev1.ClusterDeploymentCustomization), + nonInventory: nil, } for i, cdc := range cdcList.Items { cdcCol.namespace[cdc.Name] = &cdcList.Items[i] } + // Validate the non-inventory CDC, if any. + if pool.Spec.CustomizationRef != nil && pool.Spec.CustomizationRef.Name != "" { + niName := pool.Spec.CustomizationRef.Name + if niCDC, ok := cdcCol.namespace[niName]; ok { + cdcCol.nonInventory = niCDC + } + } + for _, item := range pool.Spec.Inventory { if cdc, ok := cdcCol.namespace[item.Name]; ok { cdcCol.byCDCName[item.Name] = cdc diff --git a/pkg/controller/utils/clusterdeployment.go b/pkg/controller/utils/clusterdeployment.go index 02610b3bebc..6b2b7d99c8e 100644 --- a/pkg/controller/utils/clusterdeployment.go +++ b/pkg/controller/utils/clusterdeployment.go @@ -1,6 +1,7 @@ package utils import ( + "context" "errors" "fmt" "strconv" @@ -8,6 +9,8 @@ import ( log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -209,3 +212,62 @@ func GCPNetworkProjectID(cd *hivev1.ClusterDeployment) *string { // may still be nil return cd.Spec.ClusterMetadata.Platform.GCP.NetworkProjectID } + +// LoadManifestPatches attempts to load the ClusterDeploymentCustomization(s) referenced by the +// ClusterDeployment and return their (possibly merged) InstallerManifestPatches. (ClusterPool +// clusters may have two CDC references: one from the Inventory CDC and one from +// ClusterPool.Spec.CustomizationRef. In this case we apply the latter first.) +// If there are no such references, or if neither reference contains InstallerManifestPatches, we +// return a nil object and a nil error (this is not considered an error condition). +// Any other error -- including if the referenced CDC doesn't exist -- is bubbled up. +func LoadManifestPatches(c client.Client, cd *hivev1.ClusterDeployment, log log.FieldLogger) ([]hivev1.InstallerManifestPatch, error) { + // Leetle helper to avoid chains of conditionals checking for nils along object paths + safeGetField := func(accessor func() string) string { + defer func() { recover() }() + return accessor() + } + // Helper to DRY retrieving CDC and extracting its InstallerManifestPatches + getCDCAndIMPs := func(cdcName string) ([]hivev1.InstallerManifestPatch, error) { + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := c.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cdcName}, cdc); err != nil { + log.WithField("cdc", cdcName).WithError(err).Error("failed to retrieve ClusterDeploymentCustomization") + return nil, err + } + return cdc.Spec.InstallerManifestPatches, nil + } + + imps := []hivev1.InstallerManifestPatch{} + found := false + + if cdcName := safeGetField(func() string { return cd.Spec.Provisioning.CustomizationRef.Name }); cdcName != "" { + i, err := getCDCAndIMPs(cdcName) + if err != nil { + return nil, err + } + if numIMPs := len(i); numIMPs > 0 { + log.Infof("Found %d InstallerManifestPatch entries from ClusterDeployment.Spec.CustomizationRef %s", numIMPs, cdcName) + imps = append(imps, i...) + found = true + } + } + + // We want to apply ClusterPool inventory patches last, so they "win" in case of any conflicts. + if cdcName := safeGetField(func() string { return cd.Spec.ClusterPoolRef.CustomizationRef.Name }); cdcName != "" { + i, err := getCDCAndIMPs(cdcName) + if err != nil { + return nil, err + } + if numIMPs := len(i); numIMPs > 0 { + log.Infof("Found %d InstallerManifestPatch entries from ClusterPool Inventory CDC %s", numIMPs, cdcName) + imps = append(imps, i...) + found = true + } + } + + if !found { + log.Info("no manifest customizations to apply") + return nil, nil + } + + return imps, nil +} diff --git a/pkg/controller/utils/sa.go b/pkg/controller/utils/sa.go index 1dfc1eb8734..70740cd9469 100644 --- a/pkg/controller/utils/sa.go +++ b/pkg/controller/utils/sa.go @@ -56,6 +56,11 @@ var ( Resources: []string{"machinepools"}, Verbs: []string{"get", "list", "update"}, }, + { + APIGroups: []string{"hive.openshift.io"}, + Resources: []string{"clusterdeploymentcustomizations"}, + Verbs: []string{"get"}, + }, } uninstallRoleRules = []rbacv1.PolicyRule{ diff --git a/pkg/installmanager/installmanager.go b/pkg/installmanager/installmanager.go index e0a10856acc..eb3fa9fb3bd 100644 --- a/pkg/installmanager/installmanager.go +++ b/pkg/installmanager/installmanager.go @@ -37,7 +37,7 @@ import ( "k8s.io/client-go/tools/cache" clientwatch "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -413,7 +413,7 @@ func (m *InstallManager) Run() error { if err := m.updateClusterProvision( m, func(provision *hivev1.ClusterProvision) { - provision.Spec.InstallLog = pointer.String(installLog) + provision.Spec.InstallLog = ptr.To(installLog) }, ); err != nil { m.log.WithError(err).Error("error updating cluster provision with asset generation log") @@ -447,8 +447,8 @@ func (m *InstallManager) Run() error { m, func(provision *hivev1.ClusterProvision) { provision.Spec.MetadataJSON = metadataBytes - provision.Spec.InfraID = pointer.String(metadata.InfraID) - provision.Spec.ClusterID = pointer.String(metadata.ClusterID) + provision.Spec.InfraID = ptr.To(metadata.InfraID) + provision.Spec.ClusterID = ptr.To(metadata.ClusterID) provision.Spec.AdminKubeconfigSecretRef = &corev1.LocalObjectReference{ Name: kubeconfigSecret.Name, @@ -506,7 +506,7 @@ func (m *InstallManager) Run() error { if err := m.updateClusterProvision( m, func(provision *hivev1.ClusterProvision) { - provision.Spec.InstallLog = pointer.String(installLog) + provision.Spec.InstallLog = ptr.To(installLog) }, ); err != nil { m.log.WithError(err).Warning("error updating cluster provision with installer log") @@ -944,6 +944,14 @@ func (m *InstallManager) generateAssets(cd *hivev1.ClusterDeployment, mapPoolsBy } } + // This needs to be done before `create ignition-configs`, as that thing + // consumes and removes manifests. + m.log.Info("applying customizations") + if err := m.applyCustomizations(cd); err != nil { + m.log.WithError(err).Error("error applying customizations") + return err + } + m.log.Info("running openshift-install create ignition-configs") if err := m.runOpenShiftInstallCommand("create", "ignition-configs"); err != nil { m.log.WithError(err).Error("error generating installer assets") @@ -1596,7 +1604,7 @@ func uploadAdminKubeconfig(m *InstallManager) (*corev1.Secret, error) { Kind: provisionGVK.Kind, Name: m.ClusterProvision.Name, UID: m.ClusterProvision.UID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), }} if err := createWithRetries(kubeconfigSecret, m); err != nil { @@ -1663,7 +1671,7 @@ func uploadAdminPassword(m *InstallManager) (*corev1.Secret, error) { Kind: provisionGVK.Kind, Name: m.ClusterProvision.Name, UID: m.ClusterProvision.UID, - BlockOwnerDeletion: pointer.BoolPtr(true), + BlockOwnerDeletion: ptr.To(true), }} if err := createWithRetries(s, m); err != nil { @@ -2045,3 +2053,50 @@ func patchAzureOverrideCreds(overrideCredsBytes, clusterInfraConfigBytes []byte, } return &modifiedBytes, nil } + +func (m *InstallManager) applyCustomizations(cd *hivev1.ClusterDeployment) error { + imps, err := utils.LoadManifestPatches(m.DynamicClient, cd, m.log) + if err != nil || imps == nil || len(imps) == 0 { + // loadCustomization logged + // err may be nil if the CD doesn't reference a CDC. + return err + } + + for i, imp := range imps { + glob := imp.ManifestSelector.Glob + matches, err := filepath.Glob(filepath.Join(m.WorkDir, glob)) + if err != nil { + return fmt.Errorf("manifest patch glob %d (%s) failed: %w", i, glob, err) + } + if len(matches) == 0 { + return fmt.Errorf("manifest patch glob %d (%s) matched zero manifests", i, glob) + } + m.log.Infof("manifest patch glob %d (%s) matched %d files", i, glob, len(matches)) + for _, p := range matches { + path, err := filepath.Abs(p) + if err != nil { + return fmt.Errorf("could not determine absolute path for %s: %w", p, err) + } + // m.WorkDir is already absolute + if !strings.HasPrefix(path, m.WorkDir) { + return fmt.Errorf( + "manifest patch glob %d (%s) yielded path %s which is not relative to working directory %s -- possible hack attempt", + i, glob, path, m.WorkDir) + } + m.log.Infof("patching manifest %s", path) + manifestBytes, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to load manifest %s: %w", path, err) + } + modifiedBytes, err := yamlutils.ApplyPatches(manifestBytes, imp.Patches) + if err != nil { + return fmt.Errorf("failed to apply patches to %s: %w", path, err) + } + // Perm arg is ignored for existing file + if err := os.WriteFile(path, modifiedBytes, 0); err != nil { + return fmt.Errorf("failed to write patched manifest %s to disk: %w", path, err) + } + } + } + return nil +} diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go index 0826a4b9e12..b463207406f 100644 --- a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -99,7 +99,7 @@ func Reserved() Option { } } -func WithPatch(path, op, value string) Option { +func WithInstallConfigPatch(path, op, value string) Option { return func(cdc *hivev1.ClusterDeploymentCustomization) { cdc.Spec.InstallConfigPatches = append(cdc.Spec.InstallConfigPatches, hivev1.PatchEntity{ Path: path, @@ -109,6 +109,23 @@ func WithPatch(path, op, value string) Option { } } +func WithManifestPatch(glob, path, op, value string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Spec.InstallerManifestPatches = append(cdc.Spec.InstallerManifestPatches, hivev1.InstallerManifestPatch{ + ManifestSelector: hivev1.ManifestSelector{ + Glob: glob, + }, + Patches: []hivev1.PatchEntity{ + { + Path: path, + Op: op, + Value: value, + }, + }, + }) + } +} + func WithApplySucceeded(reason string, change time.Time) Option { return func(cdc *hivev1.ClusterDeploymentCustomization) { status := metav1.ConditionTrue diff --git a/pkg/test/clusterpool/clusterpool.go b/pkg/test/clusterpool/clusterpool.go index 15bd9e38626..47f3f2270dc 100644 --- a/pkg/test/clusterpool/clusterpool.go +++ b/pkg/test/clusterpool/clusterpool.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -154,13 +154,13 @@ func WithInstallerEnv(ie []corev1.EnvVar) Option { func WithMaxSize(size int) Option { return func(clusterPool *hivev1.ClusterPool) { - clusterPool.Spec.MaxSize = pointer.Int32Ptr(int32(size)) + clusterPool.Spec.MaxSize = ptr.To(int32(size)) } } func WithMaxConcurrent(size int) Option { return func(clusterPool *hivev1.ClusterPool) { - clusterPool.Spec.MaxConcurrent = pointer.Int32Ptr(int32(size)) + clusterPool.Spec.MaxConcurrent = ptr.To(int32(size)) } } @@ -214,3 +214,11 @@ func WithInventory(cdcs []string) Option { } } } + +func WithCustomizationRef(cdcName string) Option { + return func(clusterPool *hivev1.ClusterPool) { + clusterPool.Spec.CustomizationRef = &corev1.LocalObjectReference{ + Name: cdcName, + } + } +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index 8f143b18399..0d5eb328f72 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -218,6 +218,13 @@ type Provisioning struct { // +optional InstallConfigSecretRef *corev1.LocalObjectReference `json:"installConfigSecretRef,omitempty"` + // CustomizationRef is a reference to a ClusterDeploymentCustomization containing + // InstallerManifestPatches to be applied to the manifests generated by openshift-install prior + // to starting the installation. (InstallConfigPatches will be ignored -- those changes should + // be made directly to the install-config.yaml referenced by InstallConfigSecretRef.) + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"customizationRef,omitempty"` + // ReleaseImage is the image containing metadata for all components that run in the cluster, and // is the primary and best way to specify what specific version of OpenShift you wish to install. ReleaseImage string `json:"releaseImage,omitempty"` diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go index 5cf721f7c2b..1692d8089bc 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -29,7 +29,7 @@ const ( // ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. // +kubebuilder:subresource:status // +k8s:openapi-gen=true -// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:resource:shortName=cdc,scope=Namespaced type ClusterDeploymentCustomization struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -42,6 +42,27 @@ type ClusterDeploymentCustomization struct { type ClusterDeploymentCustomizationSpec struct { // InstallConfigPatches is a list of patches to be applied to the install-config. InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` + + // InstallerManifestPatches is a list of patches to be applied to installer-generated manifests. + InstallerManifestPatches []InstallerManifestPatch `json:"installerManifestPatches,omitempty"` +} + +type InstallerManifestPatch struct { + // ManifestSelector identifies one or more manifests to patch + ManifestSelector ManifestSelector `json:"manifestSelector"` + + // Patches is a list of RFC6902 patches to apply to manifests identified by manifestSelector. + Patches []PatchEntity `json:"patches"` +} + +type ManifestSelector struct { + // Glob is a file glob (per https://pkg.go.dev/path/filepath#Glob) identifying one or more + // manifests. Paths should be relative to the installer's working directory. Examples: + // - openshift/99_role-cloud-creds-secret-reader.yaml + // - openshift/99_openshift-cluster-api_worker-machineset-*.yaml + // - */*secret* + // It is an error if a glob matches zero manifests. + Glob string `json:"glob"` } // PatchEntity represents a json patch (RFC 6902) to be applied diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index 8b1cdbaca8e..eca37072a75 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -102,6 +102,13 @@ type ClusterPoolSpec struct { // additional features of the installer. // +optional InstallerEnv []corev1.EnvVar `json:"installerEnv,omitempty"` + + // CustomizationRef refers to a ClusterDeploymentCustomization object whose InstallerManifestPatches should + // be applied to *all* ClusterDeployments created by this ClusterPool. This is in addition to any CDC from + // Inventory. The CDC must exist in the ClusterPool's namespace. It will be copied to the namespace of each + // ClusterDeployment generated by the ClusterPool. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"customizationRef,omitempty"` } type HibernationConfig struct { diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index d5fb7d8adea..146ee2b9fa9 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -742,6 +742,13 @@ func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymen *out = make([]PatchEntity, len(*in)) copy(*out, *in) } + if in.InstallerManifestPatches != nil { + in, out := &in.InstallerManifestPatches, &out.InstallerManifestPatches + *out = make([]InstallerManifestPatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1578,6 +1585,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -2778,6 +2790,28 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallerManifestPatch) DeepCopyInto(out *InstallerManifestPatch) { + *out = *in + out.ManifestSelector = in.ManifestSelector + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallerManifestPatch. +func (in *InstallerManifestPatch) DeepCopy() *InstallerManifestPatch { + if in == nil { + return nil + } + out := new(InstallerManifestPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { *out = *in @@ -3261,6 +3295,22 @@ func (in *ManageDNSGCPConfig) DeepCopy() *ManageDNSGCPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManifestSelector) DeepCopyInto(out *ManifestSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestSelector. +func (in *ManifestSelector) DeepCopy() *ManifestSelector { + if in == nil { + return nil + } + out := new(ManifestSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenStackClusterDeprovision) DeepCopyInto(out *OpenStackClusterDeprovision) { *out = *in @@ -3442,6 +3492,11 @@ func (in *Provisioning) DeepCopyInto(out *Provisioning) { *out = new(corev1.LocalObjectReference) **out = **in } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } if in.ImageSetRef != nil { in, out := &in.ImageSetRef, &out.ImageSetRef *out = new(ClusterImageSetReference) From 75f958545939a9afc582c08c33a7c5b65a466206 Mon Sep 17 00:00:00 2001 From: Eric Fried Date: Mon, 10 Mar 2025 17:43:24 -0500 Subject: [PATCH 2/2] HIVE-2797: Forbid editing CD ClusterPoolRef CustomizationRef This was missed in the original ClusterPool inventory work. --- ...terdeployment_validating_admission_hook.go | 1 + ...ployment_validating_admission_hook_test.go | 56 ++++++++++++++----- 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go index b12ce436796..e0410ef0b53 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go @@ -727,6 +727,7 @@ func (a *ClusterDeploymentValidatingAdmissionHook) validateUpdate(admissionSpec case oldPoolRef != nil && newPoolRef != nil: allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.Namespace, oldPoolRef.Namespace, specPath.Child("clusterPoolRef", "namespace"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.PoolName, oldPoolRef.PoolName, specPath.Child("clusterPoolRef", "poolName"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.CustomizationRef, oldPoolRef.CustomizationRef, specPath.Child("clusterPoolRef", "customizationRef"))...) if oldClaim := oldPoolRef.ClaimName; oldClaim != "" { allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.ClaimName, oldClaim, specPath.Child("clusterPoolRef", "claimName"))...) } diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go index a820d3becdd..2c43abc4bf9 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go @@ -90,7 +90,7 @@ func validAWSClusterDeployment() *hivev1.ClusterDeployment { return cd } -func validAWSClusterDeploymentFromPool(poolNS, poolName, claimName string) *hivev1.ClusterDeployment { +func validAWSClusterDeploymentFromPool(poolNS, poolName, claimName, custName string) *hivev1.ClusterDeployment { cd := clusterDeploymentTemplate() cd.Spec.Platform.AWS = &hivev1aws.Platform{ CredentialsSecretRef: corev1.LocalObjectReference{Name: "fake-creds-secret"}, @@ -101,6 +101,11 @@ func validAWSClusterDeploymentFromPool(poolNS, poolName, claimName string) *hive PoolName: poolName, ClaimName: claimName, } + if custName != "" { + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{ + Name: custName, + } + } return cd } @@ -296,20 +301,20 @@ func TestClusterDeploymentValidate(t *testing.T) { }, { name: "Test create with ClusterPoolReference", - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", ""), operation: admissionv1beta1.Create, expectedAllowed: true, }, { name: "Test create with Claimed ClusterPoolReference", - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim"), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim", ""), operation: admissionv1beta1.Create, expectedAllowed: false, }, { name: "Test DR create (restore) with Claimed ClusterPoolReference", newObject: func() *hivev1.ClusterDeployment { - cd := validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim") + cd := validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim", "") cd.Labels = map[string]string{constants.DisableCreationWebHookForDisasterRecovery: "true"} return cd }(), @@ -318,7 +323,7 @@ func TestClusterDeploymentValidate(t *testing.T) { }, { name: "Test update with removed ClusterPoolReference", - oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", ""), newObject: validAWSClusterDeployment(), operation: admissionv1beta1.Update, expectedAllowed: false, @@ -326,35 +331,56 @@ func TestClusterDeploymentValidate(t *testing.T) { { name: "Test update with added ClusterPoolReference", oldObject: validAWSClusterDeployment(), - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", "foo"), + operation: admissionv1beta1.Update, + expectedAllowed: false, + }, + { + name: "Test update with modified ClusterPoolReference Namespace", + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", ""), + newObject: validAWSClusterDeploymentFromPool("new-pool-ns", "mypool", "", ""), + operation: admissionv1beta1.Update, + expectedAllowed: false, + }, + { + name: "Test update with modified ClusterPoolReference PoolName", + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", "foo"), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "new-mypool", "", "foo"), + operation: admissionv1beta1.Update, + expectedAllowed: false, + }, + { + name: "Test update with added ClusterPoolReference CustomizationRef", + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", ""), + newObject: validAWSClusterDeploymentFromPool("new-pool-ns", "new-mypool", "", "foo"), operation: admissionv1beta1.Update, expectedAllowed: false, }, { - name: "Test update with modified ClusterPoolReference", - oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), - newObject: validAWSClusterDeploymentFromPool("new-pool-ns", "new-mypool", ""), + name: "Test update with modified ClusterPoolReference CustomizationRef", + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", "foo"), + newObject: validAWSClusterDeploymentFromPool("new-pool-ns", "new-mypool", "", "bar"), operation: admissionv1beta1.Update, expectedAllowed: false, }, { name: "Test update with claimed ClusterPoolReference", - oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim"), + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", ""), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim", ""), operation: admissionv1beta1.Update, expectedAllowed: true, }, { name: "Test update with unclaimed ClusterPoolReference", - oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim"), - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", ""), + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim", "foo"), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "", "foo"), operation: admissionv1beta1.Update, expectedAllowed: false, }, { name: "Test update with changed claim", - oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim"), - newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "other-claim"), + oldObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "test-claim", ""), + newObject: validAWSClusterDeploymentFromPool("pool-ns", "mypool", "other-claim", ""), operation: admissionv1beta1.Update, expectedAllowed: false, },