diff --git a/Dockerfile b/Dockerfile index d66b107334..24b2bb45cd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,7 +38,13 @@ RUN --mount=type=cache,target=/var/cache/dnf,z \ # rewrite image names for scos sed -i 's/rhel-coreos/stream-coreos/g' /manifests/*; fi && \ dnf --setopt=keepcache=true -y install 'nmstate >= 2.2.10' && \ - if ! rpm -q util-linux; then dnf install --setopt=keepcache=true -y util-linux; fi + if ! rpm -q util-linux; then dnf install --setopt=keepcache=true -y util-linux; fi && \ + # We also need to install fuse-overlayfs and cpp for Buildah to work correctly. + if ! rpm -q buildah; then dnf install --setopt=keepcache=true -y buildah fuse-overlayfs cpp --exclude container-selinux; fi && \ + # Create the build user which will be used for doing OS image builds. We + # use the username "build" and the uid 1000 since this matches what is in + # the official Buildah image. + useradd --uid 1000 build # Copy the binaries *after* we install nmstate so we don't invalidate our cache for local builds. COPY --from=rhel9-builder /go/src/github.com/openshift/machine-config-operator/instroot-rhel9.tar /tmp/instroot-rhel9.tar RUN cd / && tar xf /tmp/instroot-rhel9.tar && rm -f /tmp/instroot-rhel9.tar diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7 index 36b2935c69..340de1a79c 100644 --- a/Dockerfile.rhel7 +++ b/Dockerfile.rhel7 @@ -39,7 +39,13 @@ RUN --mount=type=cache,target=/var/cache/dnf,z \ # rewrite image names for scos sed -i 's/rhel-coreos/stream-coreos/g' /manifests/*; fi && \ dnf --setopt=keepcache=true -y install 'nmstate >= 2.2.10' && \ - if ! rpm -q util-linux; then dnf install --setopt=keepcache=true -y util-linux; fi + if ! rpm -q util-linux; then dnf install --setopt=keepcache=true -y util-linux; fi && \ + # We also need to install fuse-overlayfs and cpp for Buildah to work correctly. + if ! rpm -q buildah; then dnf install --setopt=keepcache=true -y buildah fuse-overlayfs cpp --exclude container-selinux; fi && \ + # Create the build user which will be used for doing OS image builds. We + # use the username "build" and the uid 1000 since this matches what is in + # the official Buildah image. + useradd --uid 1000 build # Copy the binaries *after* we install nmstate so we don't invalidate our cache for local builds. COPY --from=rhel9-builder /go/src/github.com/openshift/machine-config-operator/instroot-rhel9.tar /tmp/instroot-rhel9.tar RUN cd / && tar xf /tmp/instroot-rhel9.tar && rm -f /tmp/instroot-rhel9.tar diff --git a/cmd/machine-config-operator/bootstrap.go b/cmd/machine-config-operator/bootstrap.go index 384961c2e7..2c75f685aa 100644 --- a/cmd/machine-config-operator/bootstrap.go +++ b/cmd/machine-config-operator/bootstrap.go @@ -10,6 +10,7 @@ import ( imagev1 "github.com/openshift/api/image/v1" "github.com/openshift/library-go/pkg/operator/resource/resourceread" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/operator" "github.com/openshift/machine-config-operator/pkg/version" ) @@ -151,8 +152,8 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { } } - imgs := operator.Images{ - RenderConfigImages: operator.RenderConfigImages{ + imgs := ctrlcommon.Images{ + RenderConfigImages: ctrlcommon.RenderConfigImages{ MachineConfigOperator: bootstrapOpts.mcoImage, KeepalivedBootstrap: bootstrapOpts.keepalivedImage, CorednsBootstrap: bootstrapOpts.corednsImage, @@ -162,7 +163,7 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { BaseOSContainerImage: bootstrapOpts.baseOSContainerImage, BaseOSExtensionsContainerImage: bootstrapOpts.baseOSExtensionsContainerImage, }, - ControllerConfigImages: operator.ControllerConfigImages{ + ControllerConfigImages: ctrlcommon.ControllerConfigImages{ InfraImage: bootstrapOpts.infraImage, Keepalived: bootstrapOpts.keepalivedImage, Coredns: bootstrapOpts.corednsImage, diff --git a/pkg/controller/build/assets/wait.sh b/pkg/controller/build/assets/wait.sh index e90418f186..5cd7862b7a 100644 --- a/pkg/controller/build/assets/wait.sh +++ b/pkg/controller/build/assets/wait.sh @@ -6,13 +6,27 @@ # Wait until the digestfile file appears. The presence of this file indicates # that the build operation is complete. +set -euo + +# Wait until the done file appears. while [ ! -f "/tmp/done/digestfile" ] do sleep 1 done # Inject the contents of the digestfile into a ConfigMap. +set -x + +# Create the digestfile ConfigMap oc create configmap \ "$DIGEST_CONFIGMAP_NAME" \ --namespace openshift-machine-config-operator \ --from-file=digest=/tmp/done/digestfile + +# Label the digestfile ConfigMap +# shellcheck disable=SC2086 +oc label configmap \ + "$DIGEST_CONFIGMAP_NAME" \ + --namespace openshift-machine-config-operator \ + --overwrite=true \ + $DIGEST_CONFIGMAP_LABELS diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index c571d8e03d..7fa1de410c 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -16,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/labels" aggerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -55,62 +54,6 @@ import ( "github.com/openshift/machine-config-operator/internal/clients" ) -const ( - targetMachineConfigPoolLabel = "machineconfiguration.openshift.io/targetMachineConfigPool" - // TODO(zzlotnik): Is there a constant for this someplace else? - desiredConfigLabel = "machineconfiguration.openshift.io/desiredConfig" -) - -// on-cluster-build-custom-dockerfile ConfigMap name. -const ( - customDockerfileConfigMapName = "on-cluster-build-custom-dockerfile" -) - -// on-cluster-build-config ConfigMap keys. -const ( - // Name of ConfigMap which contains knobs for configuring the build controller. - OnClusterBuildConfigMapName = "on-cluster-build-config" - - // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pulling of the base OS image. - BaseImagePullSecretNameConfigKey = "baseImagePullSecretName" - - // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pushing the final OS image. - FinalImagePushSecretNameConfigKey = "finalImagePushSecretName" - - // The on-cluster-build-config ConfigMap key which contains the pullspec of where to push the final OS image (e.g., registry.hostname.com/org/repo:tag). - FinalImagePullspecConfigKey = "finalImagePullspec" -) - -// machine-config-osimageurl ConfigMap keys. -const ( - // TODO: Is this a constant someplace else? - machineConfigOSImageURLConfigMapName = "machine-config-osimageurl" - - // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). - baseOSContainerImageConfigKey = "baseOSContainerImage" - - // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). - baseOSExtensionsContainerImageConfigKey = "baseOSExtensionsContainerImage" - - // The machine-config-osimageurl ConfigMap key which contains the current OpenShift release version. - releaseVersionConfigKey = "releaseVersion" - - // The machine-config-osimageurl ConfigMap key which contains the osImageURL - // value. I don't think we actually use this anywhere though. - osImageURLConfigKey = "osImageURL" -) - -const ( - // Name of the etc-pki-entitlement secret from the openshift-config-managed namespace. - etcPkiEntitlementSecretName = "etc-pki-entitlement" - - // Name of the etc-pki-rpm-gpg secret. - etcPkiRpmGpgSecretName = "etc-pki-rpm-gpg" - - // Name of the etc-yum-repos-d ConfigMap. - etcYumReposDConfigMapName = "etc-yum-repos-d" -) - type ErrInvalidImageBuilder struct { Message string InvalidType string @@ -120,20 +63,10 @@ func (e *ErrInvalidImageBuilder) Error() string { return e.Message } -// Image builder constants. -type ImageBuilderType string - -const ( - - // CustomPodImageBuilder is the constant indicating use of the custom pod image builder. - CustomPodImageBuilder ImageBuilderType = "CustomPodBuilder" -) - var ( // controllerKind contains the schema.GroupVersionKind for this controller type. //nolint:varcheck,deadcode // This will be used eventually - controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") - validImageBuilderTypes = sets.New[ImageBuilderType](CustomPodImageBuilder) + controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") ) //nolint:revive // If I name this ControllerConfig, that name will be overloaded :P @@ -184,7 +117,7 @@ type Controller struct { config BuildControllerConfig imageBuilder ImageBuilder - imageBuilderType ImageBuilderType + imageBuilderType mcfgv1alpha1.MachineOSImageBuilderType } // Creates a BuildControllerConfig with sensible production defaults. @@ -391,7 +324,7 @@ func (ctrl *Controller) processNextMosWorkItem() bool { // Reconciles the MachineConfigPool state with the state of a custom pod object. func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[TargetMachineConfigPoolLabelKey], metav1.GetOptions{}) if err != nil { return err } @@ -967,42 +900,67 @@ func (ctrl *Controller) updateConfigAndBuild(mosc *mcfgv1alpha1.MachineOSConfig, return ctrl.syncAvailableStatus(newMosb) } +func (ctrl *Controller) getOSImageURLConfig() (*ctrlcommon.OSImageURLConfig, error) { + cm, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ctrlcommon.MachineConfigOSImageURLConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get ConfigMap %s: %w", ctrlcommon.MachineConfigOSImageURLConfigMapName, err) + } + + return ctrlcommon.ParseOSImageURLConfigMap(cm) +} + +func (ctrl *Controller) getImagesConfig() (*ctrlcommon.Images, error) { + cm, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ctrlcommon.MachineConfigOperatorImagesConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get configmap %s: %w", ctrlcommon.MachineConfigOperatorImagesConfigMapName, err) + } + + return ctrlcommon.ParseImagesFromConfigMap(cm) +} + // Prepares all of the objects needed to perform an image build. func (ctrl *Controller) prepareForBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (ImageBuildRequest, error) { ibr := newImageBuildRequestFromBuildInputs(mosb, mosc) + imagesConfig, err := ctrl.getImagesConfig() + if err != nil { + return ibr, fmt.Errorf("could not get images.json config: %w", err) + } + + ibr.MCOImagePullspec = imagesConfig.MachineConfigOperator + // populate the "optional" fields, if the user did not specify them - osImageURL, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), machineConfigOSImageURLConfigMapName, metav1.GetOptions{}) + osImageURLConfig, err := ctrl.getOSImageURLConfig() if err != nil { - return ibr, fmt.Errorf("could not get OS image URL: %w", err) + return ibr, fmt.Errorf("could not get osImageURL config: %w", err) } + moscNew := mosc.DeepCopy() - url := newExtensionsImageInfo(osImageURL, mosc) if moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec == "" { - moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec = url.Pullspec + moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec = osImageURLConfig.BaseOSExtensionsContainerImage } - url = newBaseImageInfo(osImageURL, mosc) + if moscNew.Spec.BuildInputs.BaseOSImagePullspec == "" { - moscNew.Spec.BuildInputs.BaseOSImagePullspec = url.Pullspec - moscNew.Spec.BuildInputs.ReleaseVersion = osImageURL.Data[releaseVersionConfigKey] + moscNew.Spec.BuildInputs.BaseOSImagePullspec = osImageURLConfig.BaseOSContainerImage + moscNew.Spec.BuildInputs.ReleaseVersion = osImageURLConfig.ReleaseVersion } - etcPkiEntitlements, err := ctrl.getOptionalSecret(etcPkiEntitlementSecretName) + etcPkiEntitlements, err := ctrl.getOptionalSecret(EtcPkiEntitlementSecretName) if err != nil { return ImageBuildRequest{}, err } ibr.HasEtcPkiEntitlementKeys = etcPkiEntitlements != nil - etcPkiRpmGpgKeys, err := ctrl.getOptionalSecret(etcPkiRpmGpgSecretName) + etcPkiRpmGpgKeys, err := ctrl.getOptionalSecret(EtcPkiRpmGpgSecretName) if err != nil { return ImageBuildRequest{}, err } ibr.HasEtcPkiRpmGpgKeys = etcPkiRpmGpgKeys != nil - etcYumReposDConfigs, err := ctrl.getOptionalConfigMap(etcYumReposDConfigMapName) + etcYumReposDConfigs, err := ctrl.getOptionalConfigMap(EtcYumReposDConfigMapName) if err != nil { return ImageBuildRequest{}, err } @@ -1192,7 +1150,7 @@ func (ctrl *Controller) validatePullSecret(name string) (*corev1.Secret, error) // If we have a canonicalized secret, get the original secret name from the // label, then retry validation with the original secret. This will cause the // canonicalized secret to be updated if the original secret has changed. - return ctrl.validatePullSecret(canonicalized.Labels[originalSecretNameLabel]) + return ctrl.validatePullSecret(canonicalized.Labels[OriginalSecretNameLabelKey]) } // Attempt to create a canonicalized pull secret. If the secret already exsits, we should update it. @@ -1249,7 +1207,7 @@ func (ctrl *Controller) createCanonicalizedSecret(secret *corev1.Secret) (*corev // have been updated and we should handle that more gracefully. if k8serrors.IsAlreadyExists(err) { klog.Infof("Canonicalized secret %s already exists", secret.Name) - return ctrl.validatePullSecret(secret.Labels[originalSecretNameLabel]) + return ctrl.validatePullSecret(secret.Labels[OriginalSecretNameLabelKey]) } return nil, fmt.Errorf("could not create canonicalized secret %q: %w", secret.Name, err) @@ -1439,21 +1397,14 @@ func shouldWeDoABuild(builder interface { return false, nil } -// Determines if a pod or build is managed by this controller by examining its labels. -func hasAllRequiredOSBuildLabels(labels map[string]string) bool { - requiredLabels := []string{ - ctrlcommon.OSImageBuildPodLabel, - targetMachineConfigPoolLabel, - desiredConfigLabel, - } - - for _, label := range requiredLabels { - if _, ok := labels[label]; !ok { - return false - } - } +// Determines if an object is an ephemeral build object by examining its labels. +func isEphemeralBuildObject(obj metav1.Object) bool { + return EphemeralBuildObjectSelector().Matches(labels.Set(obj.GetLabels())) +} - return true +// Determines if an object is managed by this controller by examining its labels. +func hasAllRequiredOSBuildLabels(inLabels map[string]string) bool { + return OSBuildSelector().Matches(labels.Set(inLabels)) } func (ctrl *Controller) doesMOSBExist(mosc *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSBuild, bool) { diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go index bd561b0de3..97c4e399ff 100644 --- a/pkg/controller/build/build_controller_test.go +++ b/pkg/controller/build/build_controller_test.go @@ -173,7 +173,8 @@ func TestCanonicalizedSecrets(t *testing.T) { assert.Contains(t, s.Name, canonicalSecretSuffix) assert.True(t, isCanonicalizedSecret(s)) assert.True(t, hasCanonicalizedSecretLabels(s)) - assert.Equal(t, s.Labels[originalSecretNameLabel], legacyPullSecretName) + assert.Equal(t, s.Labels[OriginalSecretNameLabelKey], legacyPullSecretName) + assert.True(t, IsObjectCreatedByBuildController(s)) } testFunc := func(t *testing.T) { @@ -288,6 +289,8 @@ func getClientsForTest() *Clients { }, }) + imagesConfigMap := getImagesConfigMap() + osImageURLConfigMap := getOSImageURLConfigMap() legacyPullSecret := `{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}` @@ -297,6 +300,7 @@ func getClientsForTest() *Clients { return &Clients{ mcfgclient: fakeclientmachineconfigv1.NewSimpleClientset(objects...), kubeclient: fakecorev1client.NewSimpleClientset( + imagesConfigMap, osImageURLConfigMap, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/build/constants.go b/pkg/controller/build/constants.go new file mode 100644 index 0000000000..db2264b97b --- /dev/null +++ b/pkg/controller/build/constants.go @@ -0,0 +1,56 @@ +package build + +// Label that associates any objects with on-cluster layering. Should be added +// to every object that BuildController creates or manages, ephemeral or not. +const ( + OnClusterLayeringLabelKey = "machineconfiguration.openshift.io/on-cluster-layering" +) + +// Labels to add to all ephemeral build objects the BuildController creates. +const ( + EphemeralBuildObjectLabelKey = "machineconfiguration.openshift.io/ephemeral-build-object" + RenderedMachineConfigLabelKey = "machineconfiguration.openshift.io/rendered-machine-config" + TargetMachineConfigPoolLabelKey = "machineconfiguration.openshift.io/target-machine-config-pool" +) + +// Annotations to add to all ephemeral build objects BuildController creates. +const ( + machineOSBuildNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-build" + machineOSConfigNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-config" +) + +// Entitled build secret names +const ( + // Name of the etc-pki-entitlement secret from the openshift-config-managed namespace. + EtcPkiEntitlementSecretName = "etc-pki-entitlement" + + // Name of the etc-pki-rpm-gpg secret. + EtcPkiRpmGpgSecretName = "etc-pki-rpm-gpg" + + // Name of the etc-yum-repos-d ConfigMap. + EtcYumReposDConfigMapName = "etc-yum-repos-d" +) + +// Canonical secrets +const ( + canonicalSecretSuffix string = "-canonical" + // This label gets applied to all secrets that we've canonicalized as a way + // to indicate that we created and own them. + CanonicalSecretLabelKey string = "machineconfiguration.openshift.io/canonicalizedSecret" + // This label is applied to all canonicalized secrets. Its value should + // contain the original name of the secret that has been canonicalized. + OriginalSecretNameLabelKey string = "machineconfiguration.openshift.io/originalSecretName" +) + +const ( + // Filename for the machineconfig JSON tarball expected by the build pod + machineConfigJSONFilename string = "machineconfig.json.gz" +) + +// Entitled build annotation keys +const ( + entitlementsAnnotationKeyBase = "machineconfiguration.openshift.io/has-" + EtcPkiEntitlementAnnotationKey = entitlementsAnnotationKeyBase + EtcPkiEntitlementSecretName + EtcYumReposDAnnotationKey = entitlementsAnnotationKeyBase + EtcYumReposDConfigMapName + EtcPkiRpmGpgAnnotationKey = entitlementsAnnotationKeyBase + EtcPkiRpmGpgSecretName +) diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go index 5d65e184ab..553c1fe6d1 100644 --- a/pkg/controller/build/fixtures_test.go +++ b/pkg/controller/build/fixtures_test.go @@ -29,16 +29,6 @@ const ( testUpdateDelay time.Duration = time.Millisecond * 5 ) -func getCustomDockerfileConfigMap(poolToDockerfile map[string]string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: customDockerfileConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: poolToDockerfile, - } -} - func newMachineOSConfig(pool *mcfgv1.MachineConfigPool) *mcfgv1alpha1.MachineOSConfig { return &mcfgv1alpha1.MachineOSConfig{ TypeMeta: metav1.TypeMeta{ @@ -156,29 +146,27 @@ func getMachineOSBuild(ctx context.Context, cs *Clients, config *mcfgv1alpha1.Ma func getOSImageURLConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: machineConfigOSImageURLConfigMapName, + Name: ctrlcommon.MachineConfigOSImageURLConfigMapName, Namespace: ctrlcommon.MCONamespace, }, Data: map[string]string{ - baseOSContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:12e89d631c0ca1700262583acfb856b6e7dbe94800cb38035d68ee5cc912411c", - baseOSExtensionsContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:5b6d901069e640fc53d2e971fa1f4802bf9dea1a4ffba67b8a17eaa7d8dfa336", - // osImageURLConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:4f7792412d1559bf0a996edeff5e836e210f6d77df94b552a3866144d043bce1", - releaseVersionConfigKey: "4.14.0-0.ci-2023-05-29-125629", + "baseOSContainerImage": "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:12e89d631c0ca1700262583acfb856b6e7dbe94800cb38035d68ee5cc912411c", + "baseOSExtensionsContainerImage": "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:5b6d901069e640fc53d2e971fa1f4802bf9dea1a4ffba67b8a17eaa7d8dfa336", + "osImageURL": "", + "releaseVersion": "4.14.0-0.ci-2023-05-29-125629", }, } } -// Gets an example on-cluster-build-config ConfigMap. -func getOnClusterBuildConfigMap() *corev1.ConfigMap { +// Gets an example machine.config.operator-images ConfigMap. +func getImagesConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: OnClusterBuildConfigMapName, + Name: ctrlcommon.MachineConfigOperatorImagesConfigMapName, Namespace: ctrlcommon.MCONamespace, }, Data: map[string]string{ - BaseImagePullSecretNameConfigKey: "base-image-pull-secret", - FinalImagePushSecretNameConfigKey: "final-image-push-secret", - FinalImagePullspecConfigKey: expectedImagePullspecWithTag, + "images.json": `{"machineConfigOperator": "mco.image.pullspec"}`, }, } } diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go index b1778b3884..94f6232132 100644 --- a/pkg/controller/build/helpers.go +++ b/pkg/controller/build/helpers.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "context" "encoding/base64" - "encoding/json" "fmt" "io" "strings" @@ -21,21 +20,61 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" k8stypes "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" corelisterv1 "k8s.io/client-go/listers/core/v1" ) -const ( - canonicalSecretSuffix string = "-canonical" - // This label gets applied to all secrets that we've canonicalized as a way - // to indicate that we created and own them. - canonicalSecretLabel string = "machineconfiguration.openshift.io/canonicalizedSecret" - // This label is applied to all canonicalized secrets. Its value should - // contain the original name of the secret that has been canonicalized. - originalSecretNameLabel string = "machineconfiguration.openshift.io/originalSecretName" -) +// Returns a selector with the appropriate labels for an OS build object label +// query. +func OSBuildSelector() labels.Selector { + return labelsToSelector([]string{ + OnClusterLayeringLabelKey, + RenderedMachineConfigLabelKey, + TargetMachineConfigPoolLabelKey, + }) +} + +// Returns a selector with the appropriate labels for an ephemeral build object +// label query. +func EphemeralBuildObjectSelector() labels.Selector { + return labelsToSelector([]string{ + EphemeralBuildObjectLabelKey, + OnClusterLayeringLabelKey, + RenderedMachineConfigLabelKey, + TargetMachineConfigPoolLabelKey, + }) +} + +// Returns a selector with the appropriate labels for a canonicalized secret +// label query. +func CanonicalizedSecretSelector() labels.Selector { + return labelsToSelector([]string{ + CanonicalSecretLabelKey, + OriginalSecretNameLabelKey, + OnClusterLayeringLabelKey, + }) +} + +// Takes a list of label keys and converts them into a Selector object that +// will require all label keys to be present. +func labelsToSelector(requiredLabels []string) labels.Selector { + reqs := []labels.Requirement{} + + for _, label := range requiredLabels { + req, err := labels.NewRequirement(label, selection.Exists, []string{}) + if err != nil { + panic(err) + } + + reqs = append(reqs, *req) + } + + return labels.NewSelector().Add(reqs...) +} // Determines if a secret has been canonicalized by us by checking both for the // suffix as well as the labels that we add to the canonicalized secret. @@ -45,22 +84,7 @@ func isCanonicalizedSecret(secret *corev1.Secret) bool { // Determines if a secret has our canonicalized secret label. func hasCanonicalizedSecretLabels(secret *corev1.Secret) bool { - if secret.Labels == nil { - return false - } - - labels := []string{ - canonicalSecretLabel, - originalSecretNameLabel, - } - - for _, label := range labels { - if _, ok := secret.Labels[label]; !ok { - return false - } - } - - return true + return CanonicalizedSecretSelector().Matches(labels.Set(secret.Labels)) } // Validates whether a secret is canonicalized. Returns an error if not. @@ -161,42 +185,6 @@ func ParseImagePullspec(pullspec, imageSHA string) (string, error) { return parseImagePullspecWithDigest(pullspec, imageDigest) } -// Converts a legacy Docker pull secret into a more modern representation. -// Essentially, it converts {"registry.hostname.com": {"username": "user"...}} -// into {"auths": {"registry.hostname.com": {"username": "user"...}}}. If it -// encounters a pull secret already in this configuration, it will return the -// input secret as-is. Returns either the supplied data or the newly-configured -// representation of said data, a boolean to indicate whether it was converted, -// and any errors resulting from the conversion process. -func canonicalizePullSecretBytes(secretBytes []byte) ([]byte, bool, error) { - type newStyleAuth struct { - Auths map[string]interface{} `json:"auths,omitempty"` - } - - // Try marshaling the new-style secret first: - newStyleDecoded := &newStyleAuth{} - if err := json.Unmarshal(secretBytes, newStyleDecoded); err != nil { - return nil, false, fmt.Errorf("could not decode new-style pull secret: %w", err) - } - - // We have an new-style secret, so we can just return here. - if len(newStyleDecoded.Auths) != 0 { - return secretBytes, false, nil - } - - // We need to convert the legacy-style secret to the new-style. - oldStyleDecoded := map[string]interface{}{} - if err := json.Unmarshal(secretBytes, &oldStyleDecoded); err != nil { - return nil, false, fmt.Errorf("could not decode legacy-style pull secret: %w", err) - } - - out, err := json.Marshal(&newStyleAuth{ - Auths: oldStyleDecoded, - }) - - return out, err == nil, err -} - // Performs the above operation upon a given secret, potentially creating a new // secret for insertion with the suffix '-canonical' on its name and a label // indicating that we've canonicalized it. @@ -222,22 +210,28 @@ func canonicalizePullSecret(secret *corev1.Secret) (*corev1.Secret, error) { return secret, nil } - out := &corev1.Secret{ + return newCanonicalSecret(secret, canonicalizedSecretBytes), nil +} + +// Creates a new canonicalized secret with the appropriate suffix, labels, etc. +// Does *not* validate whether the inputted secret bytes are in the correct +// format. +func newCanonicalSecret(secret *corev1.Secret, secretBytes []byte) *corev1.Secret { + return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s%s", secret.Name, canonicalSecretSuffix), Namespace: secret.Namespace, Labels: map[string]string{ - canonicalSecretLabel: "", - originalSecretNameLabel: secret.Name, + CanonicalSecretLabelKey: "", + OriginalSecretNameLabelKey: secret.Name, + OnClusterLayeringLabelKey: "", }, }, Data: map[string][]byte{ - corev1.DockerConfigJsonKey: canonicalizedSecretBytes, + corev1.DockerConfigJsonKey: secretBytes, }, Type: corev1.SecretTypeDockerConfigJson, } - - return out, nil } // Looks up a given secret key for a given secret type and validates that the @@ -394,3 +388,30 @@ func validateSecret(secretGetter func(string) (*corev1.Secret, error), mosc *mcf return nil } + +// Determines if a given object was created by BuildController. This is mostly +// useful for tests and other helpers that may need to clean up after a failed +// run. It first determines if the object is an ephemeral build object, next it +// checks whether the object has all of the required labels, next it checks if +// the object is a canonicalized secret, and finally, it checks whether the +// object is a MachineOSBuild. +func IsObjectCreatedByBuildController(obj metav1.Object) bool { + if isEphemeralBuildObject(obj) { + return true + } + + if hasAllRequiredOSBuildLabels(obj.GetLabels()) { + return true + } + + secret, ok := obj.(*corev1.Secret) + if ok && isCanonicalizedSecret(secret) { + return true + } + + if _, ok := obj.(*mcfgv1alpha1.MachineOSBuild); ok { + return true + } + + return false +} diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go index d4700d3191..742d102c4d 100644 --- a/pkg/controller/build/helpers_test.go +++ b/pkg/controller/build/helpers_test.go @@ -14,11 +14,9 @@ import ( ) func TestValidateImagePullspecHasDigest(t *testing.T) { - cm := getOSImageURLConfigMap() - validPullspecs := []string{ - cm.Data[baseOSContainerImageConfigKey], - cm.Data[baseOSExtensionsContainerImageConfigKey], + "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:12e89d631c0ca1700262583acfb856b6e7dbe94800cb38035d68ee5cc912411c", + "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:5b6d901069e640fc53d2e971fa1f4802bf9dea1a4ffba67b8a17eaa7d8dfa336", } for _, pullspec := range validPullspecs { @@ -167,6 +165,7 @@ func TestCanonicalizePullSecret(t *testing.T) { assert.Contains(t, out.Name, "canonical") assert.True(t, isCanonicalizedSecret(out)) assert.True(t, hasCanonicalizedSecretLabels(out)) + assert.True(t, IsObjectCreatedByBuildController(out)) } for _, val := range out.Data { @@ -244,3 +243,53 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { }) } } + +func TestIsObjectCreatedByBuildController(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + obj metav1.Object + expected bool + }{ + { + name: "MachineOSBuild", + obj: &mcfgv1alpha1.MachineOSBuild{}, + expected: true, + }, + { + name: "Canonical Secret", + obj: newCanonicalSecret(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + }, []byte{}), + expected: true, + }, + { + name: "Non-canonical secret", + obj: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + }, + }, + { + name: "Build pod", + obj: newImageBuildRequest(&mcfgv1alpha1.MachineOSConfig{}, &mcfgv1alpha1.MachineOSBuild{}).toBuildPod(), + expected: true, + }, + { + name: "Normal pod", + obj: &corev1.Pod{}, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + assert.Equal(t, IsObjectCreatedByBuildController(testCase.obj), testCase.expected) + }) + } +} diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go index 2cc999f79c..92452aa52e 100644 --- a/pkg/controller/build/image_build_request.go +++ b/pkg/controller/build/image_build_request.go @@ -12,12 +12,7 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - mcPoolAnnotation string = "machineconfiguration.openshift.io/pool" - machineConfigJSONFilename string = "machineconfig.json.gz" - buildahImagePullspec string = "quay.io/buildah/stable:latest" + "k8s.io/apimachinery/pkg/labels" ) //go:embed assets/Containerfile.on-cluster-build-template @@ -42,6 +37,8 @@ type ImageInfo struct { // Represents the request to build a layered OS image. type ImageBuildRequest struct { + // The MCO image pullspec + MCOImagePullspec string // The target Build object MachineOSBuild *mcfgv1alpha1.MachineOSBuild // the cofig the build is based off of @@ -74,28 +71,6 @@ func newImageBuildRequest(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1 return ibr } -// Populates the base image info from both the on-cluster-build-config and -// machine-config-osimageurl ConfigMaps. -func newBaseImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { - return ImageInfo{ - Pullspec: osImageURL.Data[baseOSContainerImageConfigKey], - PullSecret: corev1.LocalObjectReference{ - Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, - }, - } -} - -// Populates the extensions image info from both the on-cluster-build-config -// and machine-config-osimageurl ConfigMaps. -func newExtensionsImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { - return ImageInfo{ - Pullspec: osImageURL.Data[baseOSExtensionsContainerImageConfigKey], - PullSecret: corev1.LocalObjectReference{ - Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, - }, - } -} - // Constructs an ImageBuildRequest with all of the images populated from ConfigMaps func newImageBuildRequestFromBuildInputs(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) ImageBuildRequest { ibr := &ImageBuildRequest{ @@ -212,6 +187,13 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "DIGEST_CONFIGMAP_NAME", Value: i.getDigestConfigMapName(), }, + { + Name: "DIGEST_CONFIGMAP_LABELS", + // Gets the labels for all objects created by ImageBuildRequest, converts + // them into a string representation, and replaces the separating commas + // with spaces. + Value: strings.ReplaceAll(labels.Set(i.getLabelsForObjectMeta()).String(), ",", " "), + }, { Name: "HOME", Value: "/home/build", @@ -228,6 +210,10 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "FINAL_IMAGE_PUSH_CREDS", Value: "/tmp/final-image-push-creds/config.json", }, + { + Name: "BUILDAH_ISOLATION", + Value: "chroot", + }, } var uid int64 = 1000 @@ -354,16 +340,16 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }) volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: etcPkiEntitlementSecretName, + Name: EtcPkiEntitlementSecretName, MountPath: mountPoint, }) volumes = append(volumes, corev1.Volume{ - Name: etcPkiEntitlementSecretName, + Name: EtcPkiEntitlementSecretName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ DefaultMode: &mountMode, - SecretName: etcPkiEntitlementSecretName, + SecretName: EtcPkiEntitlementSecretName, }, }, }) @@ -379,17 +365,17 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }) volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: etcYumReposDConfigMapName, + Name: EtcYumReposDConfigMapName, MountPath: mountPoint, }) volumes = append(volumes, corev1.Volume{ - Name: etcYumReposDConfigMapName, + Name: EtcYumReposDConfigMapName, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ DefaultMode: &mountMode, LocalObjectReference: corev1.LocalObjectReference{ - Name: etcYumReposDConfigMapName, + Name: EtcYumReposDConfigMapName, }, }, }, @@ -406,16 +392,16 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }) volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: etcPkiRpmGpgSecretName, + Name: EtcPkiRpmGpgSecretName, MountPath: mountPoint, }) volumes = append(volumes, corev1.Volume{ - Name: etcPkiRpmGpgSecretName, + Name: EtcPkiRpmGpgSecretName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ DefaultMode: &mountMode, - SecretName: etcPkiRpmGpgSecretName, + SecretName: EtcPkiRpmGpgSecretName, }, }, }) @@ -437,9 +423,8 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Containers: []corev1.Container{ { // This container performs the image build / push process. - Name: "image-build", - // TODO: Figure out how to not hard-code this here. - Image: buildahImagePullspec, + Name: "image-build", + Image: i.MCOImagePullspec, Env: env, Command: append(command, buildahBuildScript), ImagePullPolicy: corev1.PullAlways, @@ -457,9 +442,9 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { // ConfigMap from the digestfile that Buildah creates, which allows // us to avoid parsing log files. Name: "wait-for-done", - Env: env, Command: append(command, waitScript), Image: i.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec, + Env: env, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, VolumeMounts: volumeMounts, @@ -471,36 +456,48 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { } } -// Constructs a common metav1.ObjectMeta object with the namespace, labels, and annotations set. -func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { - objectMeta := metav1.ObjectMeta{ - Name: name, - Namespace: ctrlcommon.MCONamespace, - Labels: map[string]string{ - ctrlcommon.OSImageBuildPodLabel: "", - targetMachineConfigPoolLabel: i.MachineOSConfig.Spec.MachineConfigPool.Name, - desiredConfigLabel: i.MachineOSBuild.Spec.DesiredConfig.Name, - }, - Annotations: map[string]string{ - mcPoolAnnotation: "", - }, +// Populates the labels map for all objects created by ImageBuildRequest +func (i ImageBuildRequest) getLabelsForObjectMeta() map[string]string { + return map[string]string{ + EphemeralBuildObjectLabelKey: "", + OnClusterLayeringLabelKey: "", + RenderedMachineConfigLabelKey: i.MachineOSBuild.Spec.DesiredConfig.Name, + TargetMachineConfigPoolLabelKey: i.MachineOSConfig.Spec.MachineConfigPool.Name, } +} - hasOptionalBuildInputTemplate := "machineconfiguration.openshift.io/has-%s" +// Populates the annotations map for all objects created by ImageBuildRequest. +// Conditionally sets annotations for entitled builds if the appropriate +// secrets / ConfigMaps are present. +func (i ImageBuildRequest) getAnnotationsForObjectMeta() map[string]string { + annos := map[string]string{ + machineOSConfigNameAnnotationKey: i.MachineOSConfig.Name, + machineOSBuildNameAnnotationKey: i.MachineOSBuild.Name, + } if i.HasEtcPkiEntitlementKeys { - objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcPkiEntitlementSecretName)] = "" + annos[EtcPkiEntitlementAnnotationKey] = "" } if i.HasEtcYumReposDConfigs { - objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcYumReposDConfigMapName)] = "" + annos[EtcYumReposDAnnotationKey] = "" } if i.HasEtcPkiRpmGpgKeys { - objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcPkiRpmGpgSecretName)] = "" + annos[EtcPkiRpmGpgAnnotationKey] = "" } - return objectMeta + return annos +} + +// Constructs a common metav1.ObjectMeta object with the namespace, labels, and annotations set. +func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: ctrlcommon.MCONamespace, + Labels: i.getLabelsForObjectMeta(), + Annotations: i.getAnnotationsForObjectMeta(), + } } // Computes the Dockerfile ConfigMap name based upon the MachineConfigPool name. diff --git a/pkg/controller/build/image_build_request_test.go b/pkg/controller/build/image_build_request_test.go index 06cfbad837..fc5f87ea02 100644 --- a/pkg/controller/build/image_build_request_test.go +++ b/pkg/controller/build/image_build_request_test.go @@ -3,7 +3,9 @@ package build import ( "testing" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Tests that Image Build Requests is constructed as expected and does a @@ -40,6 +42,23 @@ func TestImageBuildRequest(t *testing.T) { assert.Equal(t, "dockerfile-rendered-worker-1", ibr.getDockerfileConfigMapName()) assert.Equal(t, "build-rendered-worker-1", ibr.getBuildName()) assert.Equal(t, "mc-rendered-worker-1", ibr.getMCConfigMapName()) + + buildPod := ibr.toBuildPod() + + mcConfigMap, err := ibr.toConfigMap(&mcfgv1.MachineConfig{}) + assert.NoError(t, err) + + objects := []metav1.Object{ + mcConfigMap, + dockerfileConfigmap, + buildPod, + } + + for _, object := range objects { + assert.True(t, isEphemeralBuildObject(object)) + assert.True(t, hasAllRequiredOSBuildLabels(object.GetLabels())) + assert.True(t, IsObjectCreatedByBuildController(object)) + } } // Tests that the Dockerfile is correctly rendered in the absence of the diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index f101b9e090..3a55246441 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -61,8 +61,6 @@ const ( // TODO(zzlotnik): Determine if we should use this still. ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" - OSImageBuildPodLabel = "machineconfiguration.openshift.io/buildPod" - // InternalMCOIgnitionVersion is the ignition version that the MCO converts everything to internally. The intent here is that // we should be able to update this constant when we bump the internal ignition version instead of having to hunt down all of // the version references and figure out "was this supposed to be explicitly 3.4.0 or just the default version which happens @@ -90,3 +88,11 @@ const ( ServiceCARotateTrue = "true" ServiceCARotateFalse = "false" ) + +// Commonly-used MCO ConfigMap names +const ( + // The name of the machine-config-operator-images ConfigMap. + MachineConfigOperatorImagesConfigMapName string = "machine-config-operator-images" + // The name of the machine-config-osimageurl ConfigMap. + MachineConfigOSImageURLConfigMapName string = "machine-config-osimageurl" +) diff --git a/pkg/controller/common/images.go b/pkg/controller/common/images.go new file mode 100644 index 0000000000..9ac5e6620f --- /dev/null +++ b/pkg/controller/common/images.go @@ -0,0 +1,125 @@ +package common + +import ( + "fmt" + + "encoding/json" + + corev1 "k8s.io/api/core/v1" +) + +// Images contain data derived from what github.com/openshift/installer's +// bootkube.sh provides. If you want to add a new image, you need +// to "ratchet" the change as follows: +// +// Add the image here and also a CLI option with a default value +// Change the installer to pass that arg with the image from the CVO +// (some time later) Change the option to required and drop the default +type Images struct { + ReleaseVersion string `json:"releaseVersion,omitempty"` + RenderConfigImages + ControllerConfigImages +} + +// RenderConfigImages are image names used to render templates under ./manifests/ +type RenderConfigImages struct { + MachineConfigOperator string `json:"machineConfigOperator"` + // The new format image + BaseOSContainerImage string `json:"baseOSContainerImage"` + // The matching extensions container for the new format image + BaseOSExtensionsContainerImage string `json:"baseOSExtensionsContainerImage"` + // These have to be named differently from the ones in ControllerConfigImages + // or we get errors about ambiguous selectors because both structs are + // combined in the Images struct. + KeepalivedBootstrap string `json:"keepalived"` + CorednsBootstrap string `json:"coredns"` + BaremetalRuntimeCfgBootstrap string `json:"baremetalRuntimeCfg"` + OauthProxy string `json:"oauthProxy"` + KubeRbacProxy string `json:"kubeRbacProxy"` +} + +// ControllerConfigImages are image names used to render templates under ./templates/ +type ControllerConfigImages struct { + InfraImage string `json:"infraImage"` + Keepalived string `json:"keepalivedImage"` + Coredns string `json:"corednsImage"` + Haproxy string `json:"haproxyImage"` + BaremetalRuntimeCfg string `json:"baremetalRuntimeCfgImage"` +} + +// Parses the JSON blob containing the images information into an Images struct. +func ParseImagesFromBytes(in []byte) (*Images, error) { + img := &Images{} + + if err := json.Unmarshal(in, img); err != nil { + return nil, fmt.Errorf("could not parse images.json bytes: %w", err) + } + + return img, nil +} + +// Reads the contents of the provided ConfigMap into an Images struct. +func ParseImagesFromConfigMap(cm *corev1.ConfigMap) (*Images, error) { + if err := validateMCOConfigMap(cm, MachineConfigOperatorImagesConfigMapName, []string{"images.json"}, nil); err != nil { + return nil, err + } + + return ParseImagesFromBytes([]byte(cm.Data["images.json"])) +} + +// Holds the contents of the machine-config-osimageurl ConfigMap. +type OSImageURLConfig struct { + BaseOSContainerImage string + BaseOSExtensionsContainerImage string + OSImageURL string + ReleaseVersion string +} + +// Reads the contents of the provided ConfigMap into an OSImageURLConfig struct. +func ParseOSImageURLConfigMap(cm *corev1.ConfigMap) (*OSImageURLConfig, error) { + reqKeys := []string{"baseOSContainerImage", "baseOSExtensionsContainerImage", "osImageURL", "releaseVersion"} + + if err := validateMCOConfigMap(cm, MachineConfigOSImageURLConfigMapName, reqKeys, nil); err != nil { + return nil, err + } + + return &OSImageURLConfig{ + BaseOSContainerImage: cm.Data["baseOSContainerImage"], + BaseOSExtensionsContainerImage: cm.Data["baseOSExtensionsContainerImage"], + OSImageURL: cm.Data["osImageURL"], + ReleaseVersion: cm.Data["releaseVersion"], + }, nil +} + +// Validates a given ConfigMap in the MCO namespace. Valid in this case means the following: +// 1. The name matches what was provided. +// 2. The namespace is set to the MCO's namespace. +// 3. The data field has all of the expected keys. +// 4. The BinarayData field has all of the expected keys. +func validateMCOConfigMap(cm *corev1.ConfigMap, name string, reqDataKeys, reqBinaryKeys []string) error { + if cm.Name != name { + return fmt.Errorf("invalid ConfigMap, expected %s", name) + } + + if cm.Namespace != MCONamespace { + return fmt.Errorf("invalid namespace, expected %s", MCONamespace) + } + + if reqDataKeys != nil { + for _, reqKey := range reqDataKeys { + if _, ok := cm.Data[reqKey]; !ok { + return fmt.Errorf("expected missing data key %q to be present in ConfigMap %s", reqKey, cm.Name) + } + } + } + + if reqBinaryKeys != nil { + for _, reqKey := range reqBinaryKeys { + if _, ok := cm.BinaryData[reqKey]; !ok { + return fmt.Errorf("expecting missing binary data key %s to be present in ConfigMap %s", reqKey, cm.Name) + } + } + } + + return nil +} diff --git a/pkg/operator/bootstrap.go b/pkg/operator/bootstrap.go index 8ba6d64616..65fe57f32a 100644 --- a/pkg/operator/bootstrap.go +++ b/pkg/operator/bootstrap.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" templatectrl "github.com/openshift/machine-config-operator/pkg/controller/template" ) @@ -32,7 +33,7 @@ func RenderBootstrap( infraFile, networkFile, dnsFile, cloudConfigFile, cloudProviderCAFile, mcsCAFile, kubeAPIServerServingCA, pullSecretFile string, - imgs *Images, + imgs *ctrlcommon.Images, destinationDir, releaseImage string, ) error { filesData := map[string][]byte{} diff --git a/pkg/operator/images.go b/pkg/operator/images.go deleted file mode 100644 index 42bc7876cd..0000000000 --- a/pkg/operator/images.go +++ /dev/null @@ -1,40 +0,0 @@ -package operator - -// Images contain data derived from what github.com/openshift/installer's -// bootkube.sh provides. If you want to add a new image, you need -// to "ratchet" the change as follows: -// -// Add the image here and also a CLI option with a default value -// Change the installer to pass that arg with the image from the CVO -// (some time later) Change the option to required and drop the default -type Images struct { - ReleaseVersion string `json:"releaseVersion,omitempty"` - RenderConfigImages - ControllerConfigImages -} - -// RenderConfigImages are image names used to render templates under ./manifests/ -type RenderConfigImages struct { - MachineConfigOperator string `json:"machineConfigOperator"` - // The new format image - BaseOSContainerImage string `json:"baseOSContainerImage"` - // The matching extensions container for the new format image - BaseOSExtensionsContainerImage string `json:"baseOSExtensionsContainerImage"` - // These have to be named differently from the ones in ControllerConfigImages - // or we get errors about ambiguous selectors because both structs are - // combined in the Images struct. - KeepalivedBootstrap string `json:"keepalived"` - CorednsBootstrap string `json:"coredns"` - BaremetalRuntimeCfgBootstrap string `json:"baremetalRuntimeCfg"` - OauthProxy string `json:"oauthProxy"` - KubeRbacProxy string `json:"kubeRbacProxy"` -} - -// ControllerConfigImages are image names used to render templates under ./templates/ -type ControllerConfigImages struct { - InfraImage string `json:"infraImage"` - Keepalived string `json:"keepalivedImage"` - Coredns string `json:"corednsImage"` - Haproxy string `json:"haproxyImage"` - BaremetalRuntimeCfg string `json:"baremetalRuntimeCfgImage"` -} diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index c7d5847b6e..80ca257d58 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -57,9 +57,6 @@ const ( // // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s maxRetries = 15 - - // osImageConfigMapName is the name of our configmap for the osImageURL - osImageConfigMapName = "machine-config-osimageurl" ) // Operator defines machince config operator. diff --git a/pkg/operator/render.go b/pkg/operator/render.go index bcf65461d7..190f2d7c6a 100644 --- a/pkg/operator/render.go +++ b/pkg/operator/render.go @@ -38,7 +38,7 @@ type renderConfig struct { ControllerConfig mcfgv1.ControllerConfigSpec KubeAPIServerServingCA string APIServerURL string - Images *RenderConfigImages + Images *ctrlcommon.RenderConfigImages Infra configv1.Infrastructure Constants map[string]string PointerConfig string diff --git a/pkg/operator/render_test.go b/pkg/operator/render_test.go index c06b3a7ad9..d161db8172 100644 --- a/pkg/operator/render_test.go +++ b/pkg/operator/render_test.go @@ -12,6 +12,7 @@ import ( "github.com/openshift/library-go/pkg/operator/resource/resourceread" mcfgv1resourceread "github.com/openshift/machine-config-operator/lib/resourceread" "github.com/openshift/machine-config-operator/manifests" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/sets" @@ -105,7 +106,7 @@ func TestRenderAllManifests(t *testing.T) { renderConfig := &renderConfig{ TargetNamespace: "testing-namespace", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", KubeRbacProxy: "kube-rbac-proxy-image", KeepalivedBootstrap: "keepalived-bootstrap-image", @@ -141,7 +142,7 @@ func TestRenderAllManifests(t *testing.T) { Role string PointerConfig string ControllerConfig mcfgv1.ControllerConfigSpec - Images *RenderConfigImages + Images *ctrlcommon.RenderConfigImages }{ Role: "control-plane", PointerConfig: "cG9pbnRlci1jb25maWctZGF0YQo=", // This must be Base64-encoded @@ -236,7 +237,7 @@ func TestRenderAsset(t *testing.T) { RenderConfig: &renderConfig{ TargetNamespace: "testing-namespace", ReleaseVersion: "4.8.0-rc.0", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", }, }, @@ -261,7 +262,7 @@ func TestRenderAsset(t *testing.T) { MachineOSConfigs: nil, TargetNamespace: "testing-namespace", ReleaseVersion: "4.8.0-rc.0", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", KubeRbacProxy: "kube-rbac-proxy-image", }, @@ -296,7 +297,7 @@ func TestRenderAsset(t *testing.T) { RenderConfig: &renderConfig{ TargetNamespace: "testing-namespace", ReleaseVersion: "4.8.0-rc.0", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", }, }, @@ -310,7 +311,7 @@ func TestRenderAsset(t *testing.T) { RenderConfig: &renderConfig{ TargetNamespace: "testing-namespace", ReleaseVersion: "4.16.0-rc.1", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", KubeRbacProxy: "kube-rbac-proxy-image", }, @@ -565,7 +566,7 @@ func TestRenderCloudAltDNSManifests(t *testing.T) { IngressLBIP2 := configv1.IP("10.10.10.5") renderConfig := &renderConfig{ TargetNamespace: "testing-namespace", - Images: &RenderConfigImages{ + Images: &ctrlcommon.RenderConfigImages{ MachineConfigOperator: "mco-operator-image", KubeRbacProxy: "kube-rbac-proxy-image", KeepalivedBootstrap: "keepalived-bootstrap-image", @@ -613,7 +614,7 @@ func TestRenderCloudAltDNSManifests(t *testing.T) { Role string PointerConfig string ControllerConfig mcfgv1.ControllerConfigSpec - Images *RenderConfigImages + Images *ctrlcommon.RenderConfigImages }{ Role: "control-plane", PointerConfig: "cG9pbnRlci1jb25maWctZGF0YQo=", // This must be Base64-encoded diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index a26d3ce203..920c5c67cc 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -296,8 +296,9 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig, _ *configv1.ClusterOpera if err != nil { return err } - imgs := Images{} - if err := json.Unmarshal(imgsRaw, &imgs); err != nil { + + imgs, err := ctrlcommon.ParseImagesFromBytes(imgsRaw) + if err != nil { return err } @@ -1768,20 +1769,22 @@ func (optr *Operator) waitForControllerConfigToBeCompleted(resource *mcfgv1.Cont // getOsImageURLs returns (new type, new extensions, old type) for operating system update images. func (optr *Operator) getOsImageURLs(namespace string) (string, string, error) { - cm, err := optr.mcoCmLister.ConfigMaps(namespace).Get(osImageConfigMapName) + cm, err := optr.mcoCmLister.ConfigMaps(namespace).Get(ctrlcommon.MachineConfigOSImageURLConfigMapName) if err != nil { return "", "", err } - releaseVersion := cm.Data["releaseVersion"] - optrVersion, _ := optr.vStore.Get("operator") - if releaseVersion != optrVersion { - return "", "", fmt.Errorf("refusing to read osImageURL version %q, operator version %q", releaseVersion, optrVersion) + + cfg, err := ctrlcommon.ParseOSImageURLConfigMap(cm) + if err != nil { + return "", "", err } - baseOSImage := cm.Data["baseOSContainerImage"] - extensionsImage := cm.Data["baseOSExtensionsContainerImage"] + optrVersion, _ := optr.vStore.Get("operator") + if cfg.ReleaseVersion != optrVersion { + return "", "", fmt.Errorf("refusing to read osImageURL version %q, operator version %q", cfg.ReleaseVersion, optrVersion) + } - return baseOSImage, extensionsImage, nil + return cfg.BaseOSContainerImage, cfg.BaseOSExtensionsContainerImage, nil } func (optr *Operator) getCAsFromConfigMap(namespace, name, key string) ([]byte, error) { @@ -1937,7 +1940,7 @@ func setGVK(obj runtime.Object, scheme *runtime.Scheme) error { return nil } -func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *RenderConfigImages, apiServerURL string, pointerConfigData []byte, moscs []*mcfgv1alpha1.MachineOSConfig, apiServer *configv1.APIServer) *renderConfig { +func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *ctrlcommon.RenderConfigImages, apiServerURL string, pointerConfigData []byte, moscs []*mcfgv1alpha1.MachineOSConfig, apiServer *configv1.APIServer) *renderConfig { tlsMinVersion, tlsCipherSuites := ctrlcommon.GetSecurityProfileCiphersFromAPIServer(apiServer) return &renderConfig{ TargetNamespace: tnamespace, diff --git a/test/e2e-techpreview/helpers_test.go b/test/e2e-techpreview/helpers_test.go index 4a2632687d..330072c26a 100644 --- a/test/e2e-techpreview/helpers_test.go +++ b/test/e2e-techpreview/helpers_test.go @@ -17,6 +17,7 @@ import ( imagev1 "github.com/openshift/api/image/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/build" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/framework" "github.com/openshift/machine-config-operator/test/helpers" @@ -30,7 +31,13 @@ import ( "sigs.k8s.io/yaml" ) +const ( + clonedSecretLabelKey string = "machineconfiguration.openshift.io/cloned-secret" +) + func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1alpha1.MachineOSConfig) func() { + helpers.SetMetadataOnObject(t, mosc) + _, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) require.NoError(t, err) @@ -42,109 +49,239 @@ func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1al }) } -// Identifies a secret in the MCO namespace that has permissions to push to the ImageStream used for the test. -func getBuilderPushSecretName(cs *framework.ClientSet) (string, error) { - secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return "", err +// Sets up the ImageStream in the desired namesspace. If in a different +// namespace than the MCO, it will create the namespace and clone the pull +// secret into the MCO namespace. Returns the name of the push secret used, the +// image pullspec, and an idempotent cleanup function. +func setupImageStream(t *testing.T, cs *framework.ClientSet, objMeta metav1.ObjectMeta) (string, string, func()) { + t.Helper() + + cleanups := helpers.NewCleanupFuncs() + + pushSecretName := "" + + // If no namespace is provided, default to the MCO namespace. + if objMeta.Namespace == "" { + objMeta.Namespace = ctrlcommon.MCONamespace } - for _, secret := range secrets.Items { - if strings.HasPrefix(secret.Name, "builder-dockercfg") { - return secret.Name, nil + builderSAObjMeta := metav1.ObjectMeta{ + Namespace: objMeta.Namespace, + Name: "builder", + } + + // If we're told to use a different namespace than the MCO namespace, we need + // to do some additional steps. + if objMeta.Namespace != ctrlcommon.MCONamespace { + // Create the namespace. + cleanups.Add(createNamespace(t, cs, objMeta)) + + // Get the builder secret name. + origPushSecretName, err := getBuilderPushSecretName(cs, builderSAObjMeta) + require.NoError(t, err) + + src := metav1.ObjectMeta{ + Name: origPushSecretName, + Namespace: objMeta.Namespace, + } + + dst := metav1.ObjectMeta{ + Name: "builder-push-secret", + Namespace: ctrlcommon.MCONamespace, } + + // Clone the builder secret into the MCO namespace. + cleanups.Add(cloneSecret(t, cs, src, dst)) + + pushSecretName = dst.Name + } else { + // If we're running in the MCO namespace, all we need to do is get the + // builder push secret name. + origPushSecretName, err := getBuilderPushSecretName(cs, builderSAObjMeta) + require.NoError(t, err) + + pushSecretName = origPushSecretName } - return "", fmt.Errorf("could not find matching secret name in namespace %s", ctrlcommon.MCONamespace) + // Create the Imagestream. + pullspec, isCleanupFunc := createImagestream(t, cs, objMeta) + cleanups.Add(isCleanupFunc) + + return pushSecretName, pullspec, makeIdempotentAndRegister(t, cleanups.Run) } -// Gets the ImageStream pullspec for the ImageStream used for the test. -func getImagestreamPullspec(cs *framework.ClientSet, name string) (string, error) { - is, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) +// Gets the builder service account for a given namespace so we can get the +// image push secret that is bound to it. We poll for it because if this is +// done in a new namespace, there could be a slight delay between the time that +// the namespace is created and all of the various service accounts, etc. are +// created as well. This image push secret allows us to push to the +// ImageStream. +func getBuilderPushSecretName(cs *framework.ClientSet, objMeta metav1.ObjectMeta) (string, error) { + name := "" + + err := wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { + builderSA, err := cs.CoreV1Interface.ServiceAccounts(objMeta.Namespace).Get(context.TODO(), objMeta.Name, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + if builderSA == nil { + return false, nil + } + + if len(builderSA.ImagePullSecrets) == 0 { + return false, nil + } + + if builderSA.ImagePullSecrets[0].Name == "" { + return false, nil + } + + name = builderSA.ImagePullSecrets[0].Name + return true, nil + }) + if err != nil { return "", err } - return fmt.Sprintf("%s:latest", is.Status.DockerImageRepository), nil + return name, nil } -// Creates an OpenShift ImageStream in the MCO namespace for the test and -// registers a cleanup function. -func createImagestream(t *testing.T, cs *framework.ClientSet, name string) func() { - is := &imagev1.ImageStream{ +// Creates a namespace. Returns an idempotent cleanup function. +func createNamespace(t *testing.T, cs *framework.ClientSet, objMeta metav1.ObjectMeta) func() { + ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ctrlcommon.MCONamespace, + Name: objMeta.Namespace, }, } - _, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), is, metav1.CreateOptions{}) - require.NoError(t, err) + helpers.SetMetadataOnObject(t, ns) - t.Logf("Created ImageStream %q", name) + _, err := cs.CoreV1Interface.Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + require.NoError(t, err) + t.Logf("Created namespace %q", ns.Name) return makeIdempotentAndRegister(t, func() { - require.NoError(t, cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Delete(context.TODO(), name, metav1.DeleteOptions{})) - t.Logf("Deleted ImageStream %q", name) + require.NoError(t, cs.CoreV1Interface.Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{})) + t.Logf("Deleted namespace %q", ns.Name) }) } -// Creates the on-cluster-build-custom-dockerfile ConfigMap and registers a cleanup function. -func createCustomDockerfileConfigMap(t *testing.T, cs *framework.ClientSet, customDockerfiles map[string]string) func() { - return createConfigMap(t, cs, &corev1.ConfigMap{ +// Creates an OpenShift ImageStream in the MCO namespace for the test and +// registers a cleanup function. Returns the pullspec with the latest tag for +// the newly-created ImageStream. +func createImagestream(t *testing.T, cs *framework.ClientSet, objMeta metav1.ObjectMeta) (string, func()) { + is := &imagev1.ImageStream{ ObjectMeta: metav1.ObjectMeta{ - Name: "on-cluster-build-custom-dockerfile", - Namespace: ctrlcommon.MCONamespace, + Name: objMeta.Name, + Namespace: objMeta.Namespace, }, - Data: customDockerfiles, + } + + helpers.SetMetadataOnObject(t, is) + + created, err := cs.ImageV1Interface.ImageStreams(is.Namespace).Create(context.TODO(), is, metav1.CreateOptions{}) + require.NoError(t, err) + require.NotEmpty(t, created.Status.DockerImageRepository) + + pullspec := fmt.Sprintf("%s:latest", created.Status.DockerImageRepository) + t.Logf("Created ImageStream \"%s/%s\", has pullspec %q", is.Namespace, is.Name, pullspec) + + return pullspec, makeIdempotentAndRegister(t, func() { + require.NoError(t, cs.ImageV1Interface.ImageStreams(is.Namespace).Delete(context.TODO(), is.Name, metav1.DeleteOptions{})) + t.Logf("Deleted ImageStream \"%s/%s\"", is.Namespace, is.Name) }) } // Creates a given ConfigMap and registers a cleanup function to delete it. func createConfigMap(t *testing.T, cs *framework.ClientSet, cm *corev1.ConfigMap) func() { - _, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + helpers.SetMetadataOnObject(t, cm) + + _, err := cs.CoreV1Interface.ConfigMaps(cm.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) require.NoError(t, err) - t.Logf("Created ConfigMap %q", cm.Name) + t.Logf("Created ConfigMap \"%s/%s\"", cm.Namespace, cm.Name) return makeIdempotentAndRegister(t, func() { - require.NoError(t, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{})) - t.Logf("Deleted ConfigMap %q", cm.Name) + require.NoError(t, cs.CoreV1Interface.ConfigMaps(cm.Namespace).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{})) + t.Logf("Deleted ConfigMap \"%s/%s\"", cm.Namespace, cm.Name) }) } // Creates a given Secret and registers a cleanup function to delete it. func createSecret(t *testing.T, cs *framework.ClientSet, secret *corev1.Secret) func() { - _, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + helpers.SetMetadataOnObject(t, secret) + + _, err := cs.CoreV1Interface.Secrets(secret.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) require.NoError(t, err) - t.Logf("Created secret %q", secret.Name) + t.Logf("Created secret \"%s/%s\"", secret.Namespace, secret.Name) return makeIdempotentAndRegister(t, func() { require.NoError(t, cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})) - t.Logf("Deleted secret %q", secret.Name) + t.Logf("Deleted secret \"%s/%s\"", secret.Namespace, secret.Name) }) } // Copies the global pull secret from openshift-config/pull-secret into the MCO // namespace so that it can be used by the build processes. func copyGlobalPullSecret(t *testing.T, cs *framework.ClientSet) func() { - return cloneSecret(t, cs, "pull-secret", "openshift-config", globalPullSecretCloneName, ctrlcommon.MCONamespace) + src := metav1.ObjectMeta{ + Name: "pull-secret", + Namespace: "openshift-config", + } + + dst := metav1.ObjectMeta{ + Name: globalPullSecretCloneName, + Namespace: ctrlcommon.MCONamespace, + } + + return cloneSecret(t, cs, src, dst) } -func waitForMachineOSBuildToReachState(t *testing.T, cs *framework.ClientSet, poolName string, condFunc func(*mcfgv1alpha1.MachineOSBuild, error) (bool, error)) { +// Computes the name of the currently-running MachineOSBuild given the name of a MachineConfigPool. +func getMachineOSBuildNameForPool(cs *framework.ClientSet, poolName string) (string, error) { mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) - require.NoError(t, err) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s-%s-builder", poolName, mcp.Spec.Configuration.Name), nil +} - mosbName := fmt.Sprintf("%s-%s-builder", poolName, mcp.Spec.Configuration.Name) +// Waits for the provided MachineOSBuild to reach the desired state as +// determined by the condFunc provided by the caller. Will return the +// MachineOSBuild object in the final state. +func waitForMachineOSBuildToReachState(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1alpha1.MachineOSBuild, condFunc func(*mcfgv1alpha1.MachineOSBuild, error) (bool, error)) *mcfgv1alpha1.MachineOSBuild { + mosbName := mosb.Name - err = wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { + var finalMosb *mcfgv1alpha1.MachineOSBuild + + start := time.Now() + + err := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { mosb, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(context.TODO(), mosbName, metav1.GetOptions{}) + // Pass in a DeepCopy of the object so that the caller cannot inadvertantly + // mutate it and cause false results. + reached, err := condFunc(mosb.DeepCopy(), err) + if reached { + // If we've reached the desired state, grab the MachineOSBuild to return + // to the caller. + finalMosb = mosb + } - return condFunc(mosb, err) + // Return these as-is. + return reached, err }) + if err == nil { + t.Logf("MachineOSBuild %q reached desired state after %s", mosbName, time.Since(start)) + } + require.NoError(t, err, "MachineOSBuild %q did not reach desired state", mosbName) + + return finalMosb } // Waits for the target MachineConfigPool to reach a state defined in a supplied function. @@ -175,8 +312,15 @@ func makeIdempotentAndRegister(t *testing.T, cleanupFunc func()) func() { // TOOD: Refactor into smaller functions. func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { - // TODO: Instantiate this by using the label selector library. - labelSelector := "machineconfiguration.openshift.io/desiredConfig,machineconfiguration.openshift.io/buildPod,machineconfiguration.openshift.io/targetMachineConfigPool" + labelSelector := build.OSBuildSelector().String() + + // Any secrets that get created by BuildController should have different + // label selectors since they're produced differently. + secretList, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: build.CanonicalizedSecretSelector().String(), + }) + + require.NoError(t, err) cmList, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: labelSelector, @@ -196,6 +340,10 @@ func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) require.NoError(t, err) + if len(secretList.Items) == 0 { + t.Logf("No build-time secrets to clean up") + } + if len(cmList.Items) == 0 { t.Logf("No ephemeral ConfigMaps to clean up") } @@ -212,6 +360,11 @@ func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { t.Logf("No MachineOSConfigs to clean up") } + for _, item := range secretList.Items { + t.Logf("Cleaning up build-time Secret %s", item.Name) + require.NoError(t, cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + for _, item := range cmList.Items { t.Logf("Cleaning up ephemeral ConfigMap %q", item.Name) require.NoError(t, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) @@ -251,15 +404,15 @@ func getBuildArtifactDir(t *testing.T) string { return cwd } -// Writes any ephemeral ConfigMaps that got created as part of the build -// process to a file. Also writes the build pod spec. +// Writes any ephemeral build objects to disk as YAML files. func writeBuildArtifactsToFiles(t *testing.T, cs *framework.ClientSet, poolName string) { - pool, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) - require.NoError(t, err) + lo := metav1.ListOptions{ + LabelSelector: build.OSBuildSelector().String(), + } - err = aggerrs.NewAggregate([]error{ - writeConfigMapsToFile(t, cs, pool), - writePodSpecToFile(t, cs, pool), + err := aggerrs.NewAggregate([]error{ + writeConfigMapsToFile(t, cs, lo), + writeBuildPodsToFile(t, cs, lo), writeMachineOSBuildsToFile(t, cs), writeMachineOSConfigsToFile(t, cs), }) @@ -279,16 +432,7 @@ func writeMachineOSBuildsToFile(t *testing.T, cs *framework.ClientSet) error { return nil } - for _, mosb := range mosbList.Items { - mosb := mosb - filename := fmt.Sprintf("%s-%s-MachineOSBuild.yaml", t.Name(), mosb.Name) - t.Logf("Writing MachineOSBuild %s to %s", mosb.Name, filename) - if err := dumpObjectToYAMLFile(t, &mosb, filename); err != nil { - return err - } - } - - return nil + return dumpObjectToYAMLFile(t, mosbList, "machineosbuilds") } // Writes all MachineOSConfigs to a file. @@ -303,97 +447,60 @@ func writeMachineOSConfigsToFile(t *testing.T, cs *framework.ClientSet) error { return nil } - for _, mosc := range moscList.Items { - mosc := mosc - filename := fmt.Sprintf("%s-%s-MachineOSConfig.yaml", t.Name(), mosc.Name) - t.Logf("Writing MachineOSConfig %s to %s", mosc.Name, filename) - if err := dumpObjectToYAMLFile(t, &mosc, filename); err != nil { - return err - } - } - - return nil + return dumpObjectToYAMLFile(t, moscList, "machineosconfigs") } -// Writes the ephemeral ConfigMaps to a file, if found. -func writeConfigMapsToFile(t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { - configmaps := []string{ - fmt.Sprintf("dockerfile-%s", pool.Spec.Configuration.Name), - fmt.Sprintf("mc-%s", pool.Spec.Configuration.Name), - fmt.Sprintf("digest-%s", pool.Spec.Configuration.Name), - } +// Writes all ConfigMaps that match the OS Build labels to files. +func writeConfigMapsToFile(t *testing.T, cs *framework.ClientSet, lo metav1.ListOptions) error { + cmList, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), lo) - dirPath := getBuildArtifactDir(t) - - for _, configmap := range configmaps { - if err := writeConfigMapToFile(t, cs, configmap, dirPath); err != nil { - return err - } + if err != nil { + return err } - return nil -} - -// Writes a given ConfigMap to a file. -func writeConfigMapToFile(t *testing.T, cs *framework.ClientSet, configmapName, dirPath string) error { - cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), configmapName, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { - t.Logf("ConfigMap %q not found, skipping retrieval", configmapName) + if len(cmList.Items) == 0 { + t.Logf("No ConfigMaps matching label selector %q found", lo.LabelSelector) return nil } - if err != nil && !k8serrors.IsNotFound(err) { - return fmt.Errorf("could not get configmap %s: %w", configmapName, err) - } - - filename := filepath.Join(dirPath, fmt.Sprintf("%s-%s-configmap.yaml", t.Name(), configmapName)) - t.Logf("Writing configmap (%s) contents to %s", configmapName, filename) - return dumpObjectToYAMLFile(t, cm, filename) + return dumpObjectToYAMLFile(t, cmList, "configmaps") } -// Wrttes a pod spec to a file. -func writePodSpecToFile(t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { - dirPath := getBuildArtifactDir(t) - - podName := fmt.Sprintf("build-%s", pool.Spec.Configuration.Name) - - pod, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Get(context.TODO(), podName, metav1.GetOptions{}) - if err == nil { - podFilename := filepath.Join(dirPath, fmt.Sprintf("%s-%s-pod.yaml", t.Name(), pod.Name)) - t.Logf("Writing spec for pod %s to %s", pod.Name, podFilename) - return dumpObjectToYAMLFile(t, pod, podFilename) +// Wrttes all pod specs that match the OS Build labels to files. +func writeBuildPodsToFile(t *testing.T, cs *framework.ClientSet, lo metav1.ListOptions) error { + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), lo) + if err != nil { + return err } - if k8serrors.IsNotFound(err) { - t.Logf("Pod spec for %s not found, skipping", pod.Name) + if len(podList.Items) == 0 { + t.Logf("No pods matching label selector %q found", lo.LabelSelector) return nil } - return err + return dumpObjectToYAMLFile(t, podList, "pods") } // Dumps a struct to the provided filename in YAML format, while ensuring that // the filename contains both the name of the current-running test as well as // the destination directory path. -func dumpObjectToYAMLFile(t *testing.T, obj interface{ GetName() string }, filename string) error { +func dumpObjectToYAMLFile(t *testing.T, obj interface{}, kind string) error { dirPath := getBuildArtifactDir(t) - // If we don't have the name of the test embedded in the filename, add it. - if !strings.Contains(filename, t.Name()) { - filename = fmt.Sprintf("%s-%s", t.Name(), filename) - } - - // If we don't have the destination directory in the filename, add it. - if !strings.Contains(filename, dirPath) { - filename = filepath.Join(dirPath, filename) - } + filename := fmt.Sprintf("%s-%s.yaml", t.Name(), kind) + filename = filepath.Join(dirPath, filename) out, err := yaml.Marshal(obj) if err != nil { return err } - return os.WriteFile(filename, out, 0o755) + err = os.WriteFile(filename, out, 0o755) + if err == nil { + t.Logf("Wrote %s", filename) + } + + return err } // Streams the logs from the Machine OS Builder pod containers to a set of @@ -412,8 +519,8 @@ func streamMachineOSBuilderPodLogsToFile(ctx context.Context, t *testing.T, cs * // Streams the logs for all of the containers running in the build pod. The pod // logs can provide a valuable window into how / why a given build failed. -func streamBuildPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { - podName := fmt.Sprintf("build-%s", pool.Spec.Configuration.Name) +func streamBuildPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, mosb *mcfgv1alpha1.MachineOSBuild) error { + podName := mosb.Status.BuilderReference.PodImageBuilder.Name pod, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { @@ -554,21 +661,28 @@ func convertFilesFromContainerImageToBytesMap(t *testing.T, pullspec, containerF // // Registers and returns a cleanup function to remove the certificate(s) after test completion. func copyEntitlementCerts(t *testing.T, cs *framework.ClientSet) func() { - namespace := "openshift-config-managed" - name := "etc-pki-entitlement" + src := metav1.ObjectMeta{ + Name: "etc-pki-entitlement", + Namespace: "openshift-config-managed", + } + + dst := metav1.ObjectMeta{ + Name: src.Name, + Namespace: ctrlcommon.MCONamespace, + } - _, err := cs.CoreV1Interface.Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + _, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) if err == nil { - return cloneSecret(t, cs, name, namespace, name, ctrlcommon.MCONamespace) + return cloneSecret(t, cs, src, dst) } if k8serrors.IsNotFound(err) { - t.Logf("Secret %q not found in %q, skipping test", name, namespace) + t.Logf("Secret %q not found in %q, skipping test", src.Name, src.Namespace) t.Skip() return func() {} } - t.Fatalf("could not get %q from %q: %s", name, namespace, err) + t.Fatalf("could not get %q from %q: %s", src.Name, src.Namespace, err) return func() {} } @@ -617,24 +731,27 @@ func injectYumRepos(t *testing.T, cs *framework.ClientSet) func() { // Clones a given secret from a given namespace into the MCO namespace. // Registers and returns a cleanup function to delete the secret upon test // completion. -func cloneSecret(t *testing.T, cs *framework.ClientSet, srcName, srcNamespace, dstName, dstNamespace string) func() { - secret, err := cs.CoreV1Interface.Secrets(srcNamespace).Get(context.TODO(), srcName, metav1.GetOptions{}) +func cloneSecret(t *testing.T, cs *framework.ClientSet, src, dst metav1.ObjectMeta) func() { + secret, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) require.NoError(t, err) secretCopy := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: dstName, - Namespace: dstNamespace, + Name: dst.Name, + Namespace: dst.Namespace, + Labels: map[string]string{ + clonedSecretLabelKey: "", + }, }, Data: secret.Data, Type: secret.Type, } cleanup := createSecret(t, cs, secretCopy) - t.Logf("Cloned \"%s/%s\" to \"%s/%s\"", srcNamespace, srcName, dstNamespace, dstName) + t.Logf("Cloned \"%s/%s\" to \"%s/%s\"", src.Namespace, src.Name, dst.Namespace, dst.Name) return makeIdempotentAndRegister(t, func() { cleanup() - t.Logf("Deleted cloned secret \"%s/%s\"", dstNamespace, dstName) + t.Logf("Deleted cloned secret \"%s/%s\"", dst.Namespace, dst.Name) }) } diff --git a/test/e2e-techpreview/onclusterbuild_test.go b/test/e2e-techpreview/onclusterlayering_test.go similarity index 79% rename from test/e2e-techpreview/onclusterbuild_test.go rename to test/e2e-techpreview/onclusterlayering_test.go index bdfd478890..193a8115d8 100644 --- a/test/e2e-techpreview/onclusterbuild_test.go +++ b/test/e2e-techpreview/onclusterlayering_test.go @@ -15,9 +15,9 @@ import ( mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" - "github.com/openshift/machine-config-operator/pkg/controller/build" "github.com/openshift/machine-config-operator/test/framework" "github.com/openshift/machine-config-operator/test/helpers" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -31,9 +31,6 @@ const ( // The MachineConfigPool to create for the tests. layeredMCPName string = "layered" - // The ImageStream name to use for the tests. - imagestreamName string = "os-image" - // The name of the global pull secret copy to use for the tests. globalPullSecretCloneName string = "global-pull-secret-copy" ) @@ -68,9 +65,9 @@ func init() { } // Holds elements common for each on-cluster build tests. -type onClusterBuildTestOpts struct { +type onClusterLayeringTestOpts struct { // Which image builder type to use for the test. - imageBuilderType build.ImageBuilderType + imageBuilderType mcfgv1alpha1.MachineOSImageBuilderType // The custom Dockerfiles to use for the test. This is a map of MachineConfigPool name to Dockerfile content. customDockerfiles map[string]string @@ -91,7 +88,7 @@ type onClusterBuildTestOpts struct { func TestOnClusterBuildsOnOKD(t *testing.T) { skipOnOCP(t) - runOnClusterBuildTest(t, onClusterBuildTestOpts{ + runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: okdFcosDockerfile, @@ -101,7 +98,7 @@ func TestOnClusterBuildsOnOKD(t *testing.T) { // Tests tha an on-cluster build can be performed with the Custom Pod Builder. func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { - runOnClusterBuildTest(t, onClusterBuildTestOpts{ + runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, @@ -112,7 +109,7 @@ func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { // Tests that an on-cluster build can be performed and that the resulting image // is rolled out to an opted-in node. func TestOnClusterBuildRollsOutImage(t *testing.T) { - imagePullspec := runOnClusterBuildTest(t, onClusterBuildTestOpts{ + imagePullspec := runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, @@ -138,7 +135,7 @@ func TestOnClusterBuildRollsOutImage(t *testing.T) { // simulating a build where someone has added this content; usually a Red Hat // Satellite user. func TestYumReposBuilds(t *testing.T) { - runOnClusterBuildTest(t, onClusterBuildTestOpts{ + runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: yumReposDockerfile, @@ -153,7 +150,7 @@ func TestYumReposBuilds(t *testing.T) { func TestEntitledBuilds(t *testing.T) { skipOnOKD(t) - runOnClusterBuildTest(t, onClusterBuildTestOpts{ + runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: entitledDockerfile, @@ -176,7 +173,8 @@ func TestMCDGetsMachineOSConfigSecrets(t *testing.T) { // we just need to make sure it lands on the node. t.Cleanup(createSecret(t, cs, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: secretName, + Name: secretName, + Namespace: ctrlcommon.MCONamespace, }, Type: corev1.SecretTypeDockerConfigJson, Data: map[string][]byte{ @@ -191,7 +189,7 @@ func TestMCDGetsMachineOSConfigSecrets(t *testing.T) { // Set up all of the objects needed for the build, including getting (but not // yet applying) the MachineOSConfig. - mosc := prepareForTest(t, cs, onClusterBuildTestOpts{ + mosc := prepareForOnClusterLayeringTest(t, cs, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: yumReposDockerfile, @@ -362,7 +360,7 @@ func isMcdPodRunning(pod *corev1.Pod) bool { // Sets up and performs an on-cluster build for a given set of parameters. // Returns the built image pullspec for later consumption. -func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string { +func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) string { ctx, cancel := context.WithCancel(context.Background()) cancel = makeIdempotentAndRegister(t, cancel) @@ -370,13 +368,15 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string imageBuilder := testOpts.imageBuilderType if testOpts.imageBuilderType == "" { - imageBuilder = build.CustomPodImageBuilder + imageBuilder = mcfgv1alpha1.PodBuilder } t.Logf("Running with ImageBuilder type: %s", imageBuilder) - mosc := prepareForTest(t, cs, testOpts) + mosc := prepareForOnClusterLayeringTest(t, cs, testOpts) + // Create our MachineOSConfig and ensure that it is deleted after the test is + // finished. t.Cleanup(createMachineOSConfig(t, cs, mosc)) // Create a child context for the machine-os-builder pod log streamer. We @@ -385,7 +385,12 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string mobPodStreamerCtx, mobPodStreamerCancel := context.WithCancel(ctx) t.Cleanup(mobPodStreamerCancel) - waitForBuildToStart(t, cs, testOpts.poolName) + // Wait for the build to start + startedBuild := waitForBuildToStart(t, cs, testOpts.poolName) + t.Logf("MachineOSBuild %q has started", startedBuild.Name) + + // Assert that the build pod has certain properties and configuration. + assertBuildPodIsAsExpected(t, cs, startedBuild) t.Logf("Waiting for build completion...") @@ -400,6 +405,8 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string buildPodWatcherShutdown := makeIdempotentAndRegister(t, buildPodStreamerCancel) defer buildPodWatcherShutdown() + // In the event of a test failure, we want to dump all of the build artifacts + // to files for easy reference later. t.Cleanup(func() { if t.Failed() { writeBuildArtifactsToFiles(t, cs, testOpts.poolName) @@ -410,9 +417,7 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string // for each container in the build pod. So they must run in a separate // Goroutine so that the rest of the test can continue. go func() { - pool, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(buildPodStreamerCtx, testOpts.poolName, metav1.GetOptions{}) - require.NoError(t, err) - err = streamBuildPodLogsToFile(buildPodStreamerCtx, t, cs, pool) + err := streamBuildPodLogsToFile(buildPodStreamerCtx, t, cs, startedBuild) require.NoError(t, err, "expected no error, got %s", err) }() @@ -425,36 +430,33 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string require.NoError(t, err, "expected no error, got: %s", err) }() - var build *mcfgv1alpha1.MachineOSBuild - waitForMachineOSBuildToReachState(t, cs, testOpts.poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { - if err != nil { - return false, err - } - - build = mosb - - state := ctrlcommon.NewMachineOSBuildState(mosb) - - if state.IsBuildFailure() { - t.Fatalf("MachineOSBuild %q unexpectedly failed", mosb.Name) - } + // Wait for the build to complete. + finishedBuild := waitForBuildToComplete(t, cs, startedBuild) - return state.IsBuildSuccess(), nil - }) + t.Logf("MachineOSBuild %q has completed and produced image: %s", finishedBuild.Name, finishedBuild.Status.FinalImagePushspec) - t.Logf("MachineOSBuild %q has finished building. Got image: %s", build.Name, build.Status.FinalImagePushspec) - - return build.Status.FinalImagePushspec + return finishedBuild.Status.FinalImagePushspec } -func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, poolName string) { +// Waits for the build to start and returns the started MachineOSBuild object. +func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, poolName string) *mcfgv1alpha1.MachineOSBuild { t.Helper() t.Logf("Wait for build to start") - start := time.Now() + // Get the name for the MachineOSBuild based upon the MachineConfigPool name. + mosbName, err := getMachineOSBuildNameForPool(cs, poolName) + require.NoError(t, err) - waitForMachineOSBuildToReachState(t, cs, poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + // Create a "dummy" MachineOSBuild object with just the name field set so + // that waitForMachineOSBuildToReachState() can use it. + mosb := &mcfgv1alpha1.MachineOSBuild{ + ObjectMeta: metav1.ObjectMeta{ + Name: mosbName, + }, + } + + return waitForMachineOSBuildToReachState(t, cs, mosb, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { // If we had any errors retrieving the build, (other than it not existing yet), stop here. if err != nil && !k8serrors.IsNotFound(err) { return false, err @@ -467,10 +469,78 @@ func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, poolName string) } // At this point, the build object exists and we want to ensure that it is running. - return ctrlcommon.NewMachineOSBuildState(mosb).IsBuilding(), nil + if ctrlcommon.NewMachineOSBuildState(mosb).IsBuilding() { + return true, nil + } + + return false, nil + }) +} + +// Waits for the given MachineOSBuild to complete and returns the completed +// MachineOSBuild object. +func waitForBuildToComplete(t *testing.T, cs *framework.ClientSet, startedBuild *mcfgv1alpha1.MachineOSBuild) *mcfgv1alpha1.MachineOSBuild { + return waitForMachineOSBuildToReachState(t, cs, startedBuild, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + if err != nil { + return false, err + } + + state := ctrlcommon.NewMachineOSBuildState(mosb) + + if state.IsBuildFailure() { + t.Fatalf("MachineOSBuild %q unexpectedly failed", mosb.Name) + } + + return state.IsBuildSuccess(), nil }) +} + +// Validates that the build pod is configured correctly. In this case, +// "correctly" means that it has the correct container images. Future +// assertions could include things like ensuring that the proper volume mounts +// are present, etc. +func assertBuildPodIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1alpha1.MachineOSBuild) { + t.Helper() + + osImageURLConfig, err := getMachineConfigOSImageURL(cs) + require.NoError(t, err) + + mcoImages, err := getMachineConfigOperatorImages(cs) + require.NoError(t, err) + + buildPod, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Get(context.TODO(), mosb.Status.BuilderReference.PodImageBuilder.Name, metav1.GetOptions{}) + require.NoError(t, err) - t.Logf("Build started in %s!", time.Since(start)) + assertContainerIsUsingExpectedImage := func(c corev1.Container, containerName, expectedImage string) { + if c.Name == containerName { + assert.Equal(t, c.Image, expectedImage) + } + } + + for _, container := range buildPod.Spec.Containers { + assertContainerIsUsingExpectedImage(container, "image-build", mcoImages.MachineConfigOperator) + assertContainerIsUsingExpectedImage(container, "wait-for-done", osImageURLConfig.BaseOSContainerImage) + } +} + +// Gets and parses the Images data from the machine-config-operator-images configmap. +func getMachineConfigOperatorImages(cs *framework.ClientSet) (*ctrlcommon.Images, error) { + cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ctrlcommon.MachineConfigOperatorImagesConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return ctrlcommon.ParseImagesFromConfigMap(cm) +} + +// Gets and parses the OSImageURL data from the machine-config-osimageurl configmap. +func getMachineConfigOSImageURL(cs *framework.ClientSet) (*ctrlcommon.OSImageURLConfig, error) { + cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ctrlcommon.MachineConfigOSImageURLConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return ctrlcommon.ParseOSImageURLConfigMap(cm) } // Prepares for an on-cluster build test by performing the following: @@ -487,7 +557,7 @@ func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, poolName string) // // Returns a MachineOSConfig object for the caller to create to begin the build // process. -func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBuildTestOpts) *mcfgv1alpha1.MachineOSConfig { +func prepareForOnClusterLayeringTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterLayeringTestOpts) *mcfgv1alpha1.MachineOSConfig { // If the test requires RHEL entitlements, clone them from // "etc-pki-entitlement" in the "openshift-config-managed" namespace. if testOpts.useEtcPkiEntitlement { @@ -502,21 +572,20 @@ func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBui t.Cleanup(injectYumRepos(t, cs)) } - pushSecretName, err := getBuilderPushSecretName(cs) - require.NoError(t, err) - // Register ephemeral object cleanup function. t.Cleanup(func() { cleanupEphemeralBuildObjects(t, cs) }) - imagestreamName := "os-image" - t.Cleanup(createImagestream(t, cs, imagestreamName)) + imagestreamObjMeta := metav1.ObjectMeta{ + Name: "os-image", + Namespace: strings.ToLower(t.Name()), + } - t.Cleanup(copyGlobalPullSecret(t, cs)) + pushSecretName, finalPullspec, imagestreamCleanupFunc := setupImageStream(t, cs, imagestreamObjMeta) + t.Cleanup(imagestreamCleanupFunc) - finalPullspec, err := getImagestreamPullspec(cs, imagestreamName) - require.NoError(t, err) + t.Cleanup(copyGlobalPullSecret(t, cs)) if testOpts.targetNode != nil { t.Cleanup(makeIdempotentAndRegister(t, helpers.CreatePoolWithNode(t, cs, testOpts.poolName, *testOpts.targetNode))) @@ -524,10 +593,10 @@ func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBui t.Cleanup(makeIdempotentAndRegister(t, helpers.CreateMCP(t, cs, testOpts.poolName))) } - _, err = helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, "00-worker") + _, err := helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, "00-worker") require.NoError(t, err) - return &mcfgv1alpha1.MachineOSConfig{ + mosc := &mcfgv1alpha1.MachineOSConfig{ ObjectMeta: metav1.ObjectMeta{ Name: testOpts.poolName, }, @@ -560,6 +629,10 @@ func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBui }, }, } + + helpers.SetMetadataOnObject(t, mosc) + + return mosc } func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { @@ -572,7 +645,7 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { osNode := helpers.GetSingleNodeByRole(t, cs, layeredMCPName) // prepare for on cluster build test - prepareForTest(t, cs, onClusterBuildTestOpts{ + prepareForOnClusterLayeringTest(t, cs, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{}, }) @@ -605,6 +678,8 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { }, } + helpers.SetMetadataOnObject(t, testConfig) + // Create the MachineConfig and wait for the configuration to be applied _, err := cs.MachineConfigs().Create(context.TODO(), testConfig, metav1.CreateOptions{}) require.Nil(t, err, "failed to create MC") diff --git a/test/e2e/mob_test.go b/test/e2e/mob_test.go deleted file mode 100644 index 582581f461..0000000000 --- a/test/e2e/mob_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package e2e_test - -import ( - "context" - "encoding/json" - "strings" - "testing" - "time" - - "github.com/openshift/machine-config-operator/test/framework" - "github.com/openshift/machine-config-operator/test/helpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/openshift/machine-config-operator/pkg/controller/build" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/jsonmergepatch" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" -) - -func patchConfigMapForTest(t *testing.T, cs *framework.ClientSet) { - secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) - require.NoError(t, err) - secretName := "" - for _, secret := range secrets.Items { - if strings.HasPrefix(secret.Name, "builder-dockercfg") { - secretName = secret.Name - break - } - } - - t.Logf("Using secret name %q for test", secretName) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: build.OnClusterBuildConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: map[string]string{ - build.BaseImagePullSecretNameConfigKey: secretName, - build.FinalImagePushSecretNameConfigKey: secretName, - build.FinalImagePullspecConfigKey: "registry.host.com/org/repo:tag", - }, - } - - oldCM, err := cs.CoreV1Interface.ConfigMaps("openshift-machine-config-operator").Get(context.TODO(), build.OnClusterBuildConfigMapName, metav1.GetOptions{}) - require.Nil(t, err) - - oldCMJson, _ := json.Marshal(oldCM) - newCMJson, _ := json.Marshal(cm) - - patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch(oldCMJson, newCMJson, oldCMJson) - require.Nil(t, err) - - _, err = cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Patch(context.TODO(), build.OnClusterBuildConfigMapName, types.MergePatchType, patch, metav1.PatchOptions{}) - require.NoError(t, err) - - t.Logf("ConfigMap %q patched for test", cm.Name) -} - -func createConfigMapForTest(t *testing.T, cs *framework.ClientSet) func() { - secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) - require.NoError(t, err) - secretName := "" - for _, secret := range secrets.Items { - if strings.HasPrefix(secret.Name, "builder-dockercfg") { - secretName = secret.Name - break - } - } - - t.Logf("Using secret name %q for test", secretName) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: build.OnClusterBuildConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: map[string]string{ - build.BaseImagePullSecretNameConfigKey: secretName, - build.FinalImagePushSecretNameConfigKey: secretName, - build.FinalImagePullspecConfigKey: "registry.host.com/org/repo:tag", - }, - } - - _, err = cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), cm, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("ConfigMap %q created for test", cm.Name) - - cleanup := helpers.MakeIdempotent(func() { - err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) - require.NoError(t, err) - t.Logf("ConfigMap %q deleted", cm.Name) - }) - - t.Cleanup(cleanup) - return cleanup -} - -func TestMachineOSBuilder(t *testing.T) { - //add an assertion to see if deployment object is present or absent - //correlate that with pod detection - - cs := framework.NewClientSet("") - mcpName := "test-mcp" - namespace := "openshift-machine-config-operator" - mobPodNamePrefix := "machine-os-builder" - - t.Cleanup(createConfigMapForTest(t, cs)) - - // get the feature gates because we're gating this for now - featureGates, err := cs.ConfigV1Interface.FeatureGates().Get(context.TODO(), "cluster", metav1.GetOptions{}) - require.NoError(t, err, "Failed to retrieve feature gates") - - // TODO(jkyros): this should be a helper or we should use whatever the "best practice" way is - // for retrieving the gates during a test, but this works for now - var featureGateEnabled bool - for _, featureGateDetails := range featureGates.Status.FeatureGates { - for _, enabled := range featureGateDetails.Enabled { - if enabled.Name == "OnClusterBuild" { - featureGateEnabled = true - - } - } - } - - t.Logf("Feature gate OnClusterBuiild enabled: %t", featureGateEnabled) - - cleanup := helpers.MakeIdempotent(helpers.CreateMCP(t, cs, mcpName)) - t.Cleanup(cleanup) - time.Sleep(10 * time.Second) // Wait a bit to ensure MCP is fully created - - // added retry because another process was modifying the MCP concurrently - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Get the latest MCP - mcp, err := cs.MachineConfigPools().Get(context.TODO(), mcpName, metav1.GetOptions{}) - if err != nil { - return err - } - - // Set the label - mcp.ObjectMeta.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" - - // Try to update the MCP - _, err = cs.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) - return err - }) - require.Nil(t, retryErr) - - // assertion to see if the deployment object is present after setting the label - ctx := context.TODO() - - err = wait.PollUntilContextTimeout(ctx, 2*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { - exists, err := helpers.CheckDeploymentExists(cs, "machine-os-builder", namespace) - return exists, err - }) - if featureGateEnabled { - require.NoError(t, err, "Failed to check the existence of the Machine OS Builder deployment") - - // wait for Machine OS Builder pod to start - err = helpers.WaitForPodStart(cs, mobPodNamePrefix, namespace) - require.NoError(t, err, "Failed to start the Machine OS Builder pod") - t.Logf("machine-os-builder deployment exists") - - patchConfigMapForTest(t, cs) - err = helpers.WaitForPodStop(cs, mobPodNamePrefix, namespace) - require.NoError(t, err, "Failed to stop the Machine OS Builder pod") - - // assertion to see if the deployment object is present after setting the label - ctx = context.TODO() - err = wait.PollUntilContextTimeout(ctx, 2*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { - exists, err := helpers.CheckDeploymentExists(cs, "machine-os-builder", namespace) - return exists, err - }) - require.NoError(t, err, "Failed to check the existence of the Machine OS Builder deployment") - - // wait for Machine OS Builder pod to start - err = helpers.WaitForPodStart(cs, mobPodNamePrefix, namespace) - require.NoError(t, err, "Failed to start the Machine OS Builder pod") - t.Logf("machine-os-builder deployment exists") - - // delete the MachineConfigPool - cleanup() - time.Sleep(20 * time.Second) - - // assertion to see if the deployment object is absent after deleting the MCP - err = wait.PollUntilContextTimeout(ctx, 2*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) { - exists, err := helpers.CheckDeploymentExists(cs, "machine-os-builder", namespace) - return !exists, err - }) - require.NoError(t, err, "Failed to check the absence of the Machine OS Builder deployment") - - // wait for Machine OS Builder pod to stop - err = helpers.WaitForPodStop(cs, mobPodNamePrefix, namespace) - require.NoError(t, err, "Failed to stop the Machine OS Builder pod") - t.Logf("machine-os-builder deployment no longer exists") - - _, err = cs.AppsV1Interface.Deployments(ctrlcommon.MCONamespace).Get(context.TODO(), "machine-os-builder", metav1.GetOptions{}) - assert.True(t, apierrs.IsNotFound(err), "machine-os-builder deployment still present") - } else { - require.Error(t, err, "Machine OS Builder deployment exists and it should not, because the feature gate is disabled") - } -} diff --git a/test/helpers/utils.go b/test/helpers/utils.go index d4808e5518..e90552b870 100644 --- a/test/helpers/utils.go +++ b/test/helpers/utils.go @@ -48,6 +48,17 @@ import ( mcoac "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" ) +const ( + // Annotation which should be added to all objects created by an e2e test such as + // ConfigMaps, MachineConfigs, MachineConfigPools, etc. + UsedByE2ETestAnnoKey string = "machineconfiguration.openshift.io/used-by-e2e-test" + + // We have a separate label key so that we can do a label selector query. We + // don't attach the test name to this label key because the test name may + // contain invalid characters such as slashes or underscores. + UsedByE2ETestLabelKey string = UsedByE2ETestAnnoKey +) + type CleanupFuncs struct { funcs []func() } @@ -69,6 +80,8 @@ func NewCleanupFuncs() CleanupFuncs { } func ApplyMC(t *testing.T, cs *framework.ClientSet, mc *mcfgv1.MachineConfig) func() { + SetMetadataOnObject(t, mc) + _, err := cs.MachineConfigs().Create(context.TODO(), mc, metav1.CreateOptions{}) require.Nil(t, err) @@ -611,6 +624,7 @@ func LabelNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, label st return err } n.Labels[label] = "" + SetMetadataOnObject(t, n) _, err = cs.CoreV1Interface.Nodes().Update(ctx, n, metav1.UpdateOptions{}) return err }) @@ -627,6 +641,8 @@ func LabelNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, label st return err } delete(n.Labels, label) + delete(n.Labels, UsedByE2ETestLabelKey) + delete(n.Annotations, UsedByE2ETestAnnoKey) _, err = cs.CoreV1Interface.Nodes().Update(ctx, n, metav1.UpdateOptions{}) return err }) @@ -696,6 +712,8 @@ func CreateMCP(t *testing.T, cs *framework.ClientSet, mcpName string) func() { infraMCP.ObjectMeta.Labels = make(map[string]string) infraMCP.ObjectMeta.Labels[mcpName] = "" + SetMetadataOnObject(t, infraMCP) + _, err := cs.MachineConfigPools().Create(context.TODO(), infraMCP, metav1.CreateOptions{}) switch { case err == nil: @@ -1322,3 +1340,33 @@ func GetActionApplyConfiguration(action opv1.NodeDisruptionPolicySpecAction) *mc } return mcoac.NodeDisruptionPolicySpecAction().WithType(action.Type) } + +// Sets labels on any object that is created for this test. Includes the name +// of the currently running test. This is intended to aid debugging failed +// tests. +func SetMetadataOnObject(t *testing.T, obj metav1.Object) { + setAnnotationsOnObject(t, obj) + setLabelsOnObject(obj) +} + +func setAnnotationsOnObject(t *testing.T, obj metav1.Object) { + annos := obj.GetAnnotations() + if annos == nil { + annos = map[string]string{} + } + + annos[UsedByE2ETestAnnoKey] = t.Name() + + obj.SetAnnotations(annos) +} + +func setLabelsOnObject(obj metav1.Object) { + labels := obj.GetLabels() + if labels == nil { + labels = map[string]string{} + } + + labels[UsedByE2ETestLabelKey] = "" + + obj.SetLabels(labels) +}