diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 432577f80b..26a84a666a 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -195,6 +195,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), ctx.KubeInformerFactory.Core().V1().Nodes(), ctx.KubeInformerFactory.Core().V1().Pods(), + ctx.InformerFactory.Machineconfiguration().V1alpha1().MachineOSBuilds(), ctx.ConfigInformerFactory.Config().V1().Schedulers(), ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), diff --git a/cmd/machine-os-builder/start.go b/cmd/machine-os-builder/start.go index f9348f8593..fd7ff210b5 100644 --- a/cmd/machine-os-builder/start.go +++ b/cmd/machine-os-builder/start.go @@ -7,6 +7,8 @@ import ( "fmt" "os" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/internal/clients" "github.com/openshift/machine-config-operator/pkg/controller/build" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -37,6 +39,12 @@ func init() { startCmd.PersistentFlags().StringVar(&startOpts.kubeconfig, "kubeconfig", "", "Kubeconfig file to access a remote cluster (testing only)") } +func getMachineOSConfigs(ctx context.Context, cb *clients.Builder) (*mcfgv1alpha1.MachineOSConfigList, error) { + mcfgClient := cb.MachineConfigClientOrDie(componentName) + return mcfgClient.MachineconfigurationV1alpha1().MachineOSConfigs().List(ctx, metav1.ListOptions{}) + +} + // Checks if the on-cluster-build-config ConfigMap exists. If it exists, return the ConfigMap. // If not, return an error. func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*corev1.ConfigMap, error) { @@ -57,13 +65,8 @@ func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*cor // Creates a new BuildController configured for a certain image builder based // upon the imageBuilderType key in the on-cluster-build-config ConfigMap. -func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Controller, error) { - onClusterBuildConfigMap, err := getBuildControllerConfigMap(ctx, cb) - if err != nil { - return nil, err - } - - imageBuilderType, err := build.GetImageBuilderType(onClusterBuildConfigMap) +func getBuildController(ctx context.Context, cb *clients.Builder) ([]*build.Controller, error) { + machineOSConfigs, err := getMachineOSConfigs(ctx, cb) if err != nil { return nil, err } @@ -72,11 +75,12 @@ func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Contro buildClients := build.NewClientsFromControllerContext(ctrlCtx) cfg := build.DefaultBuildControllerConfig() - if imageBuilderType == build.OpenshiftImageBuilder { - return build.NewWithImageBuilder(cfg, buildClients), nil - } + controllersToStart := []*build.Controller{} - return build.NewWithCustomPodBuilder(cfg, buildClients), nil + for range machineOSConfigs.Items { + controllersToStart = append(controllersToStart, build.NewWithCustomPodBuilder(cfg, buildClients)) + } + return controllersToStart, nil } func runStartCmd(_ *cobra.Command, _ []string) { @@ -90,13 +94,12 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Infof("Version: %+v (%s)", version.Raw, version.Hash) ctx, cancel := context.WithCancel(context.Background()) - cb, err := clients.NewBuilder("") if err != nil { klog.Fatalln(err) } - ctrl, err := getBuildController(ctx, cb) + controllers, err := getBuildController(ctx, cb) if err != nil { klog.Fatalln(err) var invalidImageBuiler *build.ErrInvalidImageBuilder @@ -107,7 +110,14 @@ func runStartCmd(_ *cobra.Command, _ []string) { } } - go ctrl.Run(ctx, 5) - <-ctx.Done() + // is this... allowed? + // since users can specify different settings per pool, we need to run a controller PER pool. Otherwise, settings will be conflated, as will failures and builds. + for _, ctrl := range controllers { + go ctrl.Run(ctx, 3) + <-ctx.Done() + cancel() + } + cancel() + } diff --git a/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml b/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml index fc831ee40f..2712cca95b 100644 --- a/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml +++ b/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml @@ -56,3 +56,23 @@ rules: - get - list - watch + - apiGroups: + - machineconfiguration.openshift.io + resources: + - machineosconfigs + - machineosconfigs/status + verbs: + - create + - update + - patch + - get + - apiGroups: + - machineconfiguration.openshift.io + resources: + - machineosbuilds + - machineosbuilds/status + verbs: + - create + - update + - patch + - get diff --git a/manifests/machineconfigcontroller/clusterrole.yaml b/manifests/machineconfigcontroller/clusterrole.yaml index 9bd6ac4260..98f0820848 100644 --- a/manifests/machineconfigcontroller/clusterrole.yaml +++ b/manifests/machineconfigcontroller/clusterrole.yaml @@ -42,6 +42,12 @@ rules: - apiGroups: ["operator.openshift.io"] resources: ["machineconfigurations"] verbs: ["get","list","watch"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineosconfigs", "machineosconfigs/status"] + verbs: ["create", "update", "patch", "get"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineosbuilds", "machineosbuilds/status"] + verbs: ["create", "update", "patch", "get"] - apiGroups: - authentication.k8s.io resources: diff --git a/pkg/apihelpers/machineosbuild_apihelpers.go b/pkg/apihelpers/machineosbuild_apihelpers.go new file mode 100644 index 0000000000..31367cb5b4 --- /dev/null +++ b/pkg/apihelpers/machineosbuild_apihelpers.go @@ -0,0 +1,79 @@ +package apihelpers + +import ( + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewMachineOSBuildCondition creates a new MMachineOSBuild condition. +func NewMachineOSBuildCondition(condType string, status metav1.ConditionStatus, reason, message string) *metav1.Condition { + return &metav1.Condition{ + Type: condType, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + } +} + +func GetMachineOSBuildCondition(status mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) *metav1.Condition { + // in case of sync errors, return the last condition that matches, not the first + // this exists for redundancy and potential race conditions. + var LatestState *metav1.Condition + for i := range status.Conditions { + c := status.Conditions[i] + if mcfgv1alpha1.BuildProgress(c.Type) == condType { + LatestState = &c + } + } + return LatestState +} + +// SetMachineConfigPoolCondition updates the MachineConfigPool to include the provided condition. If the condition that +// we are about to add already exists and has the same status and reason then we are not going to update. +func SetMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condition metav1.Condition) { + currentCond := GetMachineOSBuildCondition(*status, mcfgv1alpha1.BuildProgress(condition.Type)) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && currentCond.Message == condition.Message { + return + } + // Do not update lastTransitionTime if the status of the condition doesn't change. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + + // this may not be necessary + newConditions := filterOutMachineOSBuildCondition(status.Conditions, mcfgv1alpha1.BuildProgress(condition.Type)) + status.Conditions = append(newConditions, condition) +} + +// RemoveMachineOSBuildCondition removes the MachineOSBuild condition with the provided type. +func RemoveMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) { + status.Conditions = filterOutMachineOSBuildCondition(status.Conditions, condType) +} + +// filterOutMachineOSBuildCondition returns a new slice of MachineOSBuild conditions without conditions with the provided type. +func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mcfgv1alpha1.BuildProgress) []metav1.Condition { + var newConditions []metav1.Condition + for _, c := range conditions { + if mcfgv1alpha1.BuildProgress(c.Type) == condType { + continue + } + newConditions = append(newConditions, c) + } + return newConditions +} + +func IsMachineOSBuildConditionTrue(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress) bool { + return IsMachineOSBuildConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue) +} + +// IsMachineOSBuildConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsMachineOSBuildConditionPresentAndEqual(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress, status metav1.ConditionStatus) bool { + for _, condition := range conditions { + if mcfgv1alpha1.BuildProgress(condition.Type) == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template index 26420b4931..49471ab139 100644 --- a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template +++ b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template @@ -5,22 +5,22 @@ # Decode and extract the MachineConfig from the gzipped ConfigMap and move it # into position. We do this in a separate stage so that we don't have the # gzipped MachineConfig laying around. -FROM {{.BaseImage.Pullspec}} AS extract +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS extract COPY ./machineconfig/machineconfig.json.gz /tmp/machineconfig.json.gz RUN mkdir -p /etc/machine-config-daemon && \ cat /tmp/machineconfig.json.gz | base64 -d | gunzip - > /etc/machine-config-daemon/currentconfig -{{if .ExtensionsImage.Pullspec}} +{{if .MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}} # Pull our extensions image. Not sure yet what / how this should be wired up # though. Ideally, I'd like to use some Buildah tricks to have the extensions # directory mounted into the container at build-time so that I don't have to # copy the RPMs into the container, configure the repo, and do the # installation. Alternatively, I'd have to start a pod with an HTTP server. -FROM {{.ExtensionsImage.Pullspec}} AS extensions +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}} AS extensions {{end}} -FROM {{.BaseImage.Pullspec}} AS configs +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS configs # Copy the extracted MachineConfig into the expected place in the image. COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config-daemon/currentconfig # Do the ignition live-apply, extracting the Ignition config from the MachineConfig. @@ -29,11 +29,11 @@ COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config RUN container="oci" exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported <(cat /etc/machine-config-daemon/currentconfig | jq '.spec.config') && \ ostree container commit -LABEL machineconfig={{.Pool.Spec.Configuration.Name}} -LABEL machineconfigpool={{.Pool.Name}} -LABEL releaseversion={{.ReleaseVersion}} -LABEL baseOSContainerImage={{.BaseImage.Pullspec}} +LABEL machineconfig={{.MachineOSBuild.Spec.DesiredConfig.Name}} +LABEL machineconfigpool={{.MachineOSConfig.Spec.MachineConfigPool.Name}} +LABEL releaseversion={{.MachineOSConfig.Spec.BuildInputs.ReleaseVersion}} +LABEL baseOSContainerImage={{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} -{{if .CustomDockerfile}} -{{.CustomDockerfile}} +{{if .Containerfile}} +{{.Containerfile}} {{end}} diff --git a/pkg/controller/build/assets/buildah-build.sh b/pkg/controller/build/assets/buildah-build.sh index e57bdcdad9..0c77101ee9 100644 --- a/pkg/controller/build/assets/buildah-build.sh +++ b/pkg/controller/build/assets/buildah-build.sh @@ -5,6 +5,10 @@ # custom build pod. set -xeuo +ETC_PKI_ENTITLEMENT_MOUNTPOINT="${ETC_PKI_ENTITLEMENT_MOUNTPOINT:-}" +ETC_PKI_RPM_GPG_MOUNTPOINT="${ETC_PKI_RPM_GPG_MOUNTPOINT:-}" +ETC_YUM_REPOS_D_MOUNTPOINT="${ETC_YUM_REPOS_D_MOUNTPOINT:-}" + build_context="$HOME/context" # Create a directory to hold our build context. @@ -14,12 +18,58 @@ mkdir -p "$build_context/machineconfig" cp /tmp/dockerfile/Dockerfile "$build_context" cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/" -# Build our image using Buildah. -buildah bud \ - --storage-driver vfs \ - --authfile="$BASE_IMAGE_PULL_CREDS" \ - --tag "$TAG" \ - --file="$build_context/Dockerfile" "$build_context" +build_args=( + --log-level=DEBUG + --storage-driver vfs + --authfile="$BASE_IMAGE_PULL_CREDS" + --tag "$TAG" + --file="$build_context/Dockerfile" +) + +mount_opts="z,rw" + +# If we have RHSM certs, copy them into a tempdir to avoid SELinux issues, and +# tell Buildah about them. +rhsm_path="/var/run/secrets/rhsm" +if [[ -d "$rhsm_path" ]]; then + rhsm_certs="$(mktemp -d)" + cp -r -v "$rhsm_path/." "$rhsm_certs" + chmod -R 0755 "$rhsm_certs" + build_args+=("--volume=$rhsm_certs:/run/secrets/rhsm:$mount_opts") +fi + +# If we have /etc/pki/entitlement certificates, commonly used with RHEL +# entitlements, copy them into a tempdir to avoid SELinux issues, and tell +# Buildah about them. +if [[ -n "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]]; then + configs="$(mktemp -d)" + cp -r -v "$ETC_PKI_ENTITLEMENT_MOUNTPOINT/." "$configs" + chmod -R 0755 "$configs" + build_args+=("--volume=$configs:$ETC_PKI_ENTITLEMENT_MOUNTPOINT:$mount_opts") +fi + +# If we have /etc/yum.repos.d configs, commonly used with Red Hat Satellite +# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell +# Buildah about them. +if [[ -n "$ETC_YUM_REPOS_D_MOUNTPOINT" ]] && [[ -d "$ETC_YUM_REPOS_D_MOUNTPOINT" ]]; then + configs="$(mktemp -d)" + cp -r -v "$ETC_YUM_REPOS_D_MOUNTPOINT/." "$configs" + chmod -R 0755 "$configs" + build_args+=("--volume=$configs:$ETC_YUM_REPOS_D_MOUNTPOINT:$mount_opts") +fi + +# If we have /etc/pki/rpm-gpg configs, commonly used with Red Hat Satellite +# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell +# Buildah about them. +if [[ -n "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]]; then + configs="$(mktemp -d)" + cp -r -v "$ETC_PKI_RPM_GPG_MOUNTPOINT/." "$configs" + chmod -R 0755 "$configs" + build_args+=("--volume=$configs:$ETC_PKI_RPM_GPG_MOUNTPOINT:$mount_opts") +fi + +# Build our image. +buildah bud "${build_args[@]}" "$build_context" # Push our built image. buildah push \ diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index be416aefa6..7cde8fcd9c 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "fmt" - "os" "strings" "time" "github.com/containers/image/v5/docker/reference" - buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" aggerrors "k8s.io/apimachinery/pkg/util/errors" @@ -38,6 +39,7 @@ import ( mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" corelistersv1 "k8s.io/client-go/listers/core/v1" coreinformers "k8s.io/client-go/informers" @@ -47,6 +49,7 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" "github.com/openshift/machine-config-operator/internal/clients" @@ -97,6 +100,17 @@ const ( osImageURLConfigKey = "osImageURL" ) +const ( + // Name of the etc-pki-entitlement secret from the openshift-config-managed namespace. + etcPkiEntitlementSecretName = "etc-pki-entitlement" + + // Name of the etc-pki-rpm-gpg secret. + etcPkiRpmGpgSecretName = "etc-pki-rpm-gpg" + + // Name of the etc-yum-repos-d ConfigMap. + etcYumReposDConfigMapName = "etc-yum-repos-d" +) + type ErrInvalidImageBuilder struct { Message string InvalidType string @@ -109,23 +123,22 @@ func (e *ErrInvalidImageBuilder) Error() string { // Image builder constants. type ImageBuilderType string +type BuildProgress string const ( - // ImageBuilderTypeConfigMapKey is the key in the ConfigMap that determines which type of image builder to use. - ImageBuilderTypeConfigMapKey string = "imageBuilderType" - - // OpenshiftImageBuilder is the constant indicating use of the OpenShift image builder. - OpenshiftImageBuilder ImageBuilderType = "openshift-image-builder" // CustomPodImageBuilder is the constant indicating use of the custom pod image builder. - CustomPodImageBuilder ImageBuilderType = "custom-pod-builder" + CustomPodImageBuilder ImageBuilderType = "CustomPodBuilder" + // Missing build progress constants from the API + MachineOSBuildReady BuildProgress = "Ready" + MachineOSBuildRestarted BuildProgress = "Restarted" ) var ( // controllerKind contains the schema.GroupVersionKind for this controller type. //nolint:varcheck,deadcode // This will be used eventually controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") - validImageBuilderTypes = sets.New[ImageBuilderType](OpenshiftImageBuilder, CustomPodImageBuilder) + validImageBuilderTypes = sets.New[ImageBuilderType](CustomPodImageBuilder) ) //nolint:revive // If I name this ControllerConfig, that name will be overloaded :P @@ -147,9 +160,8 @@ type BuildControllerConfig struct { type ImageBuilder interface { Run(context.Context, int) StartBuild(ImageBuildRequest) (*corev1.ObjectReference, error) - IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) - DeleteBuildObject(*mcfgv1.MachineConfigPool) error - FinalPullspec(*mcfgv1.MachineConfigPool) (string, error) + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) + DeleteBuildObject(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) error } // Controller defines the build controller. @@ -159,18 +171,21 @@ type Controller struct { eventRecorder record.EventRecorder - syncHandler func(mcp string) error - enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + syncHandler func(build string) error - cmLister corelistersv1.ConfigMapLister - ccLister mcfglistersv1.ControllerConfigLister - mcpLister mcfglistersv1.MachineConfigPoolLister + cmLister corelistersv1.ConfigMapLister + ccLister mcfglistersv1.ControllerConfigLister + mcpLister mcfglistersv1.MachineConfigPoolLister + machineOSBuildLister mcfglistersv1alpha1.MachineOSBuildLister + machineOSConfigLister mcfglistersv1alpha1.MachineOSConfigLister - ccListerSynced cache.InformerSynced - mcpListerSynced cache.InformerSynced - podListerSynced cache.InformerSynced + machineOSConfigListerSynced cache.InformerSynced + machineOSBuildListerSynced cache.InformerSynced + ccListerSynced cache.InformerSynced + mcpListerSynced cache.InformerSynced + podListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + mosQueue workqueue.RateLimitingInterface config BuildControllerConfig imageBuilder ImageBuilder @@ -206,12 +221,14 @@ func NewClients(cb *clients.Builder) *Clients { // Holds and starts each of the infomrers used by the Build Controller and its subcontrollers. type informers struct { - ccInformer mcfginformersv1.ControllerConfigInformer - mcpInformer mcfginformersv1.MachineConfigPoolInformer - buildInformer buildinformersv1.BuildInformer - podInformer coreinformersv1.PodInformer - cmInformer coreinformersv1.ConfigMapInformer - toStart []interface{ Start(<-chan struct{}) } + ccInformer mcfginformersv1.ControllerConfigInformer + mcpInformer mcfginformersv1.MachineConfigPoolInformer + buildInformer buildinformersv1.BuildInformer + podInformer coreinformersv1.PodInformer + cmInformer coreinformersv1.ConfigMapInformer + machineOSBuildInformer mcfginformersv1alpha1.MachineOSBuildInformer + machineOSConfigInformer mcfginformersv1alpha1.MachineOSConfigInformer + toStart []interface{ Start(<-chan struct{}) } } // Starts the informers, wiring them up to the provided context. @@ -228,19 +245,26 @@ func newInformers(bcc *Clients) *informers { cmInformer := coreinformers.NewFilteredSharedInformerFactory(bcc.kubeclient, 0, ctrlcommon.MCONamespace, nil) buildInformer := buildinformers.NewSharedInformerFactoryWithOptions(bcc.buildclient, 0, buildinformers.WithNamespace(ctrlcommon.MCONamespace)) podInformer := coreinformers.NewSharedInformerFactoryWithOptions(bcc.kubeclient, 0, coreinformers.WithNamespace(ctrlcommon.MCONamespace)) + // this may not work, might need a new mcfg client and or a new informer pkg + machineOSBuildInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + machineOSConfigInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) return &informers{ - ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), - mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), - cmInformer: cmInformer.Core().V1().ConfigMaps(), - buildInformer: buildInformer.Build().V1().Builds(), - podInformer: podInformer.Core().V1().Pods(), + ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), + mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), + cmInformer: cmInformer.Core().V1().ConfigMaps(), + buildInformer: buildInformer.Build().V1().Builds(), + podInformer: podInformer.Core().V1().Pods(), + machineOSBuildInformer: machineOSBuildInformer.Machineconfiguration().V1alpha1().MachineOSBuilds(), + machineOSConfigInformer: machineOSConfigInformer.Machineconfiguration().V1alpha1().MachineOSConfigs(), toStart: []interface{ Start(<-chan struct{}) }{ ccInformer, mcpInformer, buildInformer, cmInformer, podInformer, + machineOSBuildInformer, + machineOSConfigInformer, }, } } @@ -258,26 +282,35 @@ func newBuildController( informers: newInformers(clients), Clients: clients, eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-buildcontroller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-buildcontroller"), + mosQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder"), config: ctrlConfig, } - ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addMachineConfigPool, - UpdateFunc: ctrl.updateMachineConfigPool, - DeleteFunc: ctrl.deleteMachineConfigPool, - }) + ctrl.syncHandler = ctrl.syncMachineOSBuilder + + ctrl.ccLister = ctrl.ccInformer.Lister() + ctrl.mcpLister = ctrl.mcpInformer.Lister() + + ctrl.machineOSConfigLister = ctrl.machineOSConfigInformer.Lister() + ctrl.machineOSBuildLister = ctrl.machineOSBuildInformer.Lister() - ctrl.cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerDetailedFuncs{ - UpdateFunc: ctrl.updateConfigMap, + ctrl.machineOSBuildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineOSBuild, + DeleteFunc: ctrl.deleteMachineOSBuild, }) - ctrl.syncHandler = ctrl.syncMachineConfigPool - ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + ctrl.machineOSConfigInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineOSConfig, + AddFunc: ctrl.addMachineOSConfig, + DeleteFunc: ctrl.deleteMachineOSConfig, + }) - ctrl.ccLister = ctrl.ccInformer.Lister() - ctrl.mcpLister = ctrl.mcpInformer.Lister() + ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineConfigPool, + }) + ctrl.machineOSConfigListerSynced = ctrl.machineOSConfigInformer.Informer().HasSynced + ctrl.machineOSBuildListerSynced = ctrl.machineOSBuildInformer.Informer().HasSynced ctrl.ccListerSynced = ctrl.ccInformer.Informer().HasSynced ctrl.mcpListerSynced = ctrl.mcpInformer.Informer().HasSynced @@ -295,17 +328,6 @@ func NewWithCustomPodBuilder( return ctrl } -// Creates a Build Controller instance with an OpenShift Image Builder -// implementation for the ImageBuilder. -func NewWithImageBuilder( - ctrlConfig BuildControllerConfig, - clients *Clients, -) *Controller { - ctrl := newBuildController(ctrlConfig, clients) - ctrl.imageBuilder = newImageBuildController(ctrlConfig, clients, ctrl.imageBuildUpdater) - return ctrl -} - // Run executes the render controller. // TODO: Make this use a context instead of a stop channel. func (ctrl *Controller) Run(parentCtx context.Context, workers int) { @@ -315,7 +337,7 @@ func (ctrl *Controller) Run(parentCtx context.Context, workers int) { // Not sure if I actually need a child context here or not. ctx, cancel := context.WithCancel(parentCtx) defer utilruntime.HandleCrash() - defer ctrl.queue.ShutDown() + defer ctrl.mosQueue.ShutDown() defer cancel() ctrl.informers.start(ctx) @@ -327,61 +349,44 @@ func (ctrl *Controller) Run(parentCtx context.Context, workers int) { go ctrl.imageBuilder.Run(ctx, workers) for i := 0; i < workers; i++ { - go wait.Until(ctrl.worker, time.Second, ctx.Done()) + go wait.Until(ctrl.mosWorker, time.Second, ctx.Done()) } <-ctx.Done() } -func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) - return - } - - ctrl.queue.Add(key) -} - -func (ctrl *Controller) enqueueRateLimited(pool *mcfgv1.MachineConfigPool) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) +func (ctrl *Controller) enqueueMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(mosc) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", mosc, err)) return } - - ctrl.queue.AddRateLimited(key) + ctrl.mosQueue.Add(key) } -// enqueueAfter will enqueue a pool after the provided amount of time. -func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) +func (ctrl *Controller) enqueueMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(mosb) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", mosb, err)) return } - ctrl.queue.AddAfter(key, after) -} - -// enqueueDefault calls a default enqueue function -func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { - ctrl.enqueueAfter(pool, ctrl.config.UpdateDelay) + ctrl.mosQueue.Add(key) } // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. -func (ctrl *Controller) worker() { - for ctrl.processNextWorkItem() { +func (ctrl *Controller) mosWorker() { + for ctrl.processNextMosWorkItem() { } } -func (ctrl *Controller) processNextWorkItem() bool { - key, quit := ctrl.queue.Get() +func (ctrl *Controller) processNextMosWorkItem() bool { + key, quit := ctrl.mosQueue.Get() if quit { return false } - defer ctrl.queue.Done(key) + defer ctrl.mosQueue.Done(key) err := ctrl.syncHandler(key.(string)) ctrl.handleErr(err, key) @@ -389,109 +394,74 @@ func (ctrl *Controller) processNextWorkItem() bool { return true } -// Checks for new Data in the on-cluster-build-config configmap -// if the imageBuilderType has changed, we need to restart the controller -func (ctrl *Controller) updateConfigMap(old, new interface{}) { - oldCM := old.(*corev1.ConfigMap).DeepCopy() - newCM := new.(*corev1.ConfigMap).DeepCopy() - if newCM.Name == OnClusterBuildConfigMapName && oldCM.Data[ImageBuilderTypeConfigMapKey] != newCM.Data[ImageBuilderTypeConfigMapKey] { - // restart ctrl and re-init - mcps, _ := ctrl.mcpLister.List(labels.Everything()) - impactedPools := []*mcfgv1.MachineConfigPool{} - for _, mcp := range mcps { - if ctrlcommon.IsLayeredPool(mcp) { - if running, _ := ctrl.imageBuilder.IsBuildRunning(mcp); running { - // building on this pool, we have changed img builder type. Need to stop build - impactedPools = append(impactedPools, mcp) - ps := newPoolState(mcp) - ctrl.imageBuilder.DeleteBuildObject(mcp) - ctrl.markBuildInterrupted(ps) - } - } - } - if ImageBuilderType(newCM.Data[ImageBuilderTypeConfigMapKey]) != OpenshiftImageBuilder && ImageBuilderType(newCM.Data[ImageBuilderTypeConfigMapKey]) != CustomPodImageBuilder { - ctrl.handleConfigMapError(impactedPools, &ErrInvalidImageBuilder{Message: "Invalid Image Builder Type found in configmap", InvalidType: newCM.Data[ImageBuilderTypeConfigMapKey]}, new) - os.Exit(255) - } - os.Exit(0) - } -} - -// Reconciles the MachineConfigPool state with the state of an OpenShift Image -// Builder object. -func (ctrl *Controller) imageBuildUpdater(build *buildv1.Build) error { - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), build.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) +// Reconciles the MachineConfigPool state with the state of a custom pod object. +func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) if err != nil { return err } - klog.Infof("Build (%s) is %s", build.Name, build.Status.Phase) - - objRef := toObjectRef(build) - - ps := newPoolState(pool) - - switch build.Status.Phase { - case buildv1.BuildPhaseNew, buildv1.BuildPhasePending: - if !ps.IsBuildPending() { - err = ctrl.markBuildPendingWithObjectRef(ps, *objRef) - } - case buildv1.BuildPhaseRunning: - // If we're running, then there's nothing to do right now. - if !ps.IsBuilding() { - err = ctrl.markBuildInProgress(ps) - } - case buildv1.BuildPhaseComplete: - // If we've succeeded, we need to update the pool to indicate that. - if !ps.IsBuildSuccess() { - err = ctrl.markBuildSucceeded(ps) - } - case buildv1.BuildPhaseFailed, buildv1.BuildPhaseError, buildv1.BuildPhaseCancelled: - // If we've failed, errored, or cancelled, we need to update the pool to indicate that. - if !ps.IsBuildFailure() { - err = ctrl.markBuildFailed(ps) - } - } + klog.V(4).Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) + mosbs, err := ctrl.machineOSBuildLister.List(labels.Everything()) if err != nil { return err } - - ctrl.enqueueMachineConfigPool(pool) - return nil -} - -// Reconciles the MachineConfigPool state with the state of a custom pod object. -func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + moscs, err := ctrl.machineOSConfigLister.List(labels.Everything()) if err != nil { return err } - klog.Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) + var mosb *mcfgv1alpha1.MachineOSBuild + var mosc *mcfgv1alpha1.MachineOSConfig + for _, config := range moscs { + if config.Spec.MachineConfigPool.Name == pool.Name { + mosc = config + break + } + } + for _, build := range mosbs { + if build.Spec.MachineOSConfig.Name == mosc.Name { + mosb = build + break + } + } - ps := newPoolState(pool) + // We cannot solely rely upon the pod phase to determine whether the build + // pod is in an error state. This is because it is possible for the build + // container to enter an error state while the wait-for-done container is + // still running. The pod phase in this state will still be "Running" as + // opposed to error. + if isBuildPodError(pod) { + if err := ctrl.markBuildFailed(mosc, mosb); err != nil { + return err + } + ctrl.enqueueMachineOSBuild(mosb) + return nil + } + + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) switch pod.Status.Phase { case corev1.PodPending: - if !ps.IsBuildPending() { + if !mosbState.IsBuildPending() { objRef := toObjectRef(pod) - err = ctrl.markBuildPendingWithObjectRef(ps, *objRef) + err = ctrl.markBuildPendingWithObjectRef(mosc, mosb, *objRef) } case corev1.PodRunning: // If we're running, then there's nothing to do right now. - if !ps.IsBuilding() { - err = ctrl.markBuildInProgress(ps) + if !mosbState.IsBuilding() { + err = ctrl.markBuildInProgress(mosb) } case corev1.PodSucceeded: // If we've succeeded, we need to update the pool to indicate that. - if !ps.IsBuildSuccess() { - err = ctrl.markBuildSucceeded(ps) + if !mosbState.IsBuildSuccess() { + err = ctrl.markBuildSucceeded(mosc, mosb) } case corev1.PodFailed: // If we've failed, we need to update the pool to indicate that. - if !ps.IsBuildFailure() { - err = ctrl.markBuildFailed(ps) + if !mosbState.IsBuildFailure() { + err = ctrl.markBuildFailed(mosc, mosb) } } @@ -499,248 +469,338 @@ func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { return err } - ctrl.enqueueMachineConfigPool(pool) + ctrl.enqueueMachineOSBuild(mosb) return nil } func (ctrl *Controller) handleConfigMapError(pools []*mcfgv1.MachineConfigPool, err error, key interface{}) { klog.V(2).Infof("Error syncing configmap %v: %v", key, err) utilruntime.HandleError(err) + // get mosb assoc. with pool for _, pool := range pools { klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", pool.Name, err) - ctrl.queue.Forget(pool.Name) - ctrl.queue.AddAfter(pool.Name, 1*time.Minute) + ctrl.mosQueue.Forget(pool.Name) + ctrl.mosQueue.AddAfter(pool.Name, 1*time.Minute) } } func (ctrl *Controller) handleErr(err error, key interface{}) { if err == nil { - ctrl.queue.Forget(key) + ctrl.mosQueue.Forget(key) return } - if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { - klog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) - ctrl.queue.AddRateLimited(key) + if ctrl.mosQueue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing machineosbuild %v: %v", key, err) + ctrl.mosQueue.AddRateLimited(key) return } utilruntime.HandleError(err) - klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) - ctrl.queue.Forget(key) - ctrl.queue.AddAfter(key, 1*time.Minute) + klog.V(2).Infof("Dropping machineosbuild %q out of the queue: %v", key, err) + ctrl.mosQueue.Forget(key) + ctrl.mosQueue.AddAfter(key, 1*time.Minute) } -// syncMachineConfigPool will sync the machineconfig pool with the given key. -// This function is not meant to be invoked concurrently with the same key. -func (ctrl *Controller) syncMachineConfigPool(key string) error { - startTime := time.Now() - klog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) - defer func() { - klog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) - }() +func (ctrl *Controller) syncMachineOSBuilder(key string) error { + // startTime := time.Now() + // klog.V(4).Infof("Started syncing build %q (%v)", key, startTime) + // defer func() { + // klog.V(4).Infof("Finished syncing machineOSBuilder %q (%v)", key, time.Since(startTime)) + // }() _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } - machineconfigpool, err := ctrl.mcpLister.Get(name) + isConfig := false + var machineOSConfig *mcfgv1alpha1.MachineOSConfig + machineOSBuild, err := ctrl.machineOSBuildLister.Get(name) if k8serrors.IsNotFound(err) { - klog.V(2).Infof("MachineConfigPool %v has been deleted", key) - return nil - } - if err != nil { - return err + // if this is not an existing build. This means our machineOsConfig changed + isConfig = true + machineOSConfig, err = ctrl.machineOSConfigLister.Get(name) + if k8serrors.IsNotFound(err) { + return nil + } } + if !isConfig { + for _, cond := range machineOSBuild.Status.Conditions { + if cond.Status == metav1.ConditionTrue { + switch mcfgv1alpha1.BuildProgress(cond.Type) { + case mcfgv1alpha1.MachineOSBuildPrepared: + klog.V(4).Infof("Build %s is build prepared and pending", name) + return nil + case mcfgv1alpha1.MachineOSBuilding: + klog.V(4).Infof("Build %s is building", name) + return nil + case mcfgv1alpha1.MachineOSBuildFailed: + klog.V(4).Infof("Build %s is failed", name) + return nil + case mcfgv1alpha1.MachineOSBuildInterrupted: + klog.V(4).Infof("Build %s is interrupted, requeueing", name) + ctrl.enqueueMachineOSBuild(machineOSBuild) + case mcfgv1alpha1.MachineOSBuildSucceeded: + klog.V(4).Infof("Build %s has successfully built", name) + return nil + default: + machineOSConfig, err := ctrl.machineOSConfigLister.Get(machineOSBuild.Spec.MachineOSConfig.Name) + if err != nil { + return err + } + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, machineOSConfig, machineOSBuild, machineOSBuild) + if err != nil { + return err + } + if doABuild { + ctrl.startBuildForMachineConfigPool(machineOSConfig, machineOSBuild) + } - // TODO: Doing a deep copy of this pool object from our cache and using it to - // determine our next course of action sometimes causes a race condition. I'm - // not sure if it's better to get a current copy from the API server or what. - // pool := machineconfigpool.DeepCopy() - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), machineconfigpool.Name, metav1.GetOptions{}) - if err != nil { - return err + } + + } + } + } else { + // this is a config change or a config CREATION. We need to possibly make a mosb for this build. The updated config is handlded in the updateMachineOSConfig function + // if ctrl.imageBuilder. + var buildExists bool + var status *mcfgv1alpha1.MachineOSBuildStatus + machineOSBuild, buildExists = ctrl.doesMOSBExist(machineOSConfig) + if !buildExists { + machineOSBuild, status, err = ctrl.CreateBuildFromConfig(machineOSConfig) + if err != nil { + return err + } + machineOSBuild.Status = *status + if err := ctrl.startBuildForMachineConfigPool(machineOSConfig, machineOSBuild); err != nil { + ctrl.syncAvailableStatus(machineOSBuild) + return err + } + return nil + } } + return ctrl.syncAvailableStatus(machineOSBuild) +} - ps := newPoolState(pool) +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() + curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() + klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) - // Not a layered pool, so stop here. - if !ps.IsLayered() { - klog.V(4).Infof("MachineConfigPool %s is not opted-in for layering, ignoring", pool.Name) - return nil + moscOld, mosbOld, _ := ctrl.getConfigAndBuildForPool(oldPool) + moscNew, mosbNew, _ := ctrl.getConfigAndBuildForPool(curPool) + + doABuild, err := ctrlcommon.BuildDueToPoolChange(ctrl.imageBuilder, oldPool, curPool, moscNew, mosbNew) + if err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } + //reBuild := shouldWeRebuild(oldPool, curPool) + switch { - case ps.IsInterrupted(): - klog.V(4).Infof("MachineConfigPool %s is build interrupted, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsDegraded(): - klog.V(4).Infof("MachineConfigPool %s is degraded, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsRenderDegraded(): - klog.V(4).Infof("MachineConfigPool %s is render degraded, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsBuildPending(): - klog.V(4).Infof("MachineConfigPool %s is build pending", pool.Name) - return nil - case ps.IsBuilding(): - klog.V(4).Infof("MachineConfigPool %s is building", pool.Name) - return nil - case ps.IsBuildSuccess(): - klog.V(4).Infof("MachineConfigPool %s has successfully built", pool.Name) - return nil - default: - shouldBuild, err := shouldWeDoABuild(ctrl.imageBuilder, pool, pool) + // We've transitioned from a layered pool to a non-layered pool. + case ctrlcommon.IsLayeredPool(moscOld, mosbOld) && !ctrlcommon.IsLayeredPool(moscNew, mosbNew): + klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) + if err := ctrl.finalizeOptOut(moscNew, mosbNew, curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // We need to do a build. + case doABuild: + klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) + var status *mcfgv1alpha1.MachineOSBuildStatus + mosbNew, status, err = ctrl.CreateBuildFromConfig(moscNew) if err != nil { - return fmt.Errorf("could not determine if a build is required for MachineConfigPool %q: %w", pool.Name, err) + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } + mosbNew.Status = *status - if shouldBuild { - return ctrl.startBuildForMachineConfigPool(ps) + if err := ctrl.startBuildForMachineConfigPool(moscNew, mosbNew); err != nil { + ctrl.syncAvailableStatus(mosbNew) + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } - - klog.V(4).Infof("Nothing to do for pool %q", pool.Name) + // // We need to restart build due to config change + // case reBuild: + // klog.V(4).Infof("MachineConfigPool %s requires a build restart", curPool.Name) + // if err := ctrl.restartBuildForMachineConfigPool(newPoolState(oldPool), newPoolState(curPool)); err != nil { + // klog.Errorln(err) + // ctrl.handleErr(err, curPool.Name) + // } + // Everything else. + default: + klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) } - - // For everything else - return ctrl.syncAvailableStatus(pool) } -func (ctrl *Controller) markBuildInterrupted(ps *poolState) error { - klog.Errorf("Build interrupted for pool %s", ps.Name()) +func (ctrl *Controller) markBuildInterrupted(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.Errorf("Build %s interrupted for pool %s", mosb.Name, mosc.Spec.MachineConfigPool.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Reason: "BuildInterrupted", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Running", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionTrue, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionTrue, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - ps.pool.Spec.Configuration.Source = ps.pool.Spec.Configuration.Source[:len(ps.pool.Spec.Configuration.Source)-1] - - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) + // update mosc status + return ctrl.syncAvailableStatus(bs.Build) }) } // Marks a given MachineConfigPool as a failed build. -func (ctrl *Controller) markBuildFailed(ps *poolState) error { - klog.Errorf("Build failed for pool %s", ps.Name()) +func (ctrl *Controller) markBuildFailed(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.Errorf("Build %s failed for pool %s", mosb.Name, mosc.Spec.MachineConfigPool.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Reason: "BuildFailed", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionTrue, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.syncFailingStatus(ps.MachineConfigPool(), fmt.Errorf("build failed")) + return ctrl.syncFailingStatus(mosc, bs.Build, fmt.Errorf("BuildFailed")) }) + } // Marks a given MachineConfigPool as the build is in progress. -func (ctrl *Controller) markBuildInProgress(ps *poolState) error { - klog.Infof("Build in progress for MachineConfigPool %s, config %s", ps.Name(), ps.CurrentMachineConfig()) +func (ctrl *Controller) markBuildInProgress(mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.V(4).Infof("Build %s in progress for config %s", mosb.Name, mosb.Spec.DesiredConfig.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionTrue, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Reason: "BuildRunning", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.syncAvailableStatus(ps.MachineConfigPool()) + return ctrl.syncAvailableStatus(mosb) }) } // Deletes the ephemeral objects we created to perform this specific build. -func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreMissing bool) error { +func (ctrl *Controller) postBuildCleanup(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, ignoreMissing bool) error { // Delete the actual build object itself. deleteBuildObject := func() error { - err := ctrl.imageBuilder.DeleteBuildObject(pool) + err := ctrl.imageBuilder.DeleteBuildObject(mosb, mosc) if err == nil { - klog.Infof("Deleted build object %s", newImageBuildRequest(pool).getBuildName()) + klog.Infof("Deleted build object %s", newImageBuildRequest(mosc, mosb).getBuildName()) } return err @@ -748,7 +808,7 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM // Delete the ConfigMap containing the MachineConfig. deleteMCConfigMap := func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getMCConfigMapName(), metav1.DeleteOptions{}) @@ -761,7 +821,7 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM // Delete the ConfigMap containing the rendered Dockerfile. deleteDockerfileConfigMap := func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDockerfileConfigMapName(), metav1.DeleteOptions{}) @@ -791,183 +851,228 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM ) } -// Marks a given MachineConfigPool as build successful and cleans up after itself. -func (ctrl *Controller) markBuildSucceeded(ps *poolState) error { - klog.Infof("Build succeeded for MachineConfigPool %s, config %s", ps.Name(), ps.CurrentMachineConfig()) - - pool := ps.MachineConfigPool() - - // Get the final image pullspec. - imagePullspec, err := ctrl.imageBuilder.FinalPullspec(pool) - if err != nil { - return fmt.Errorf("could not get final image pullspec for pool %s: %w", ps.Name(), err) - } - - if imagePullspec == "" { - return fmt.Errorf("image pullspec empty for pool %s", ps.Name()) +// If one wants to opt out, this removes all of the statuses and object +// references from a given MachineConfigPool. +func (ctrl *Controller) finalizeOptOut(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, pool *mcfgv1.MachineConfigPool) error { + if err := ctrl.postBuildCleanup(mosc, mosb, true); err != nil { + return err } + return nil +} - // Perform the post-build cleanup. - if err := ctrl.postBuildCleanup(pool, false); err != nil { - return fmt.Errorf("could not do post-build cleanup: %w", err) - } +// Marks a given MachineConfigPool as build successful and cleans up after itself. +func (ctrl *Controller) markBuildSucceeded(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.V(4).Infof("Build %s succeeded for MachineConfigPool %s, config %s", mosb.Name, mosc.Spec.MachineConfigPool.Name, mosb.Spec.DesiredConfig.Name) - // Perform the MachineConfigPool update. return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) + // REPLACE FINAL PULLSPEC WITH SHA HERE USING ctrl.imagebuilder.FinalPullspec + ibr := newImageBuildRequest(mosc, mosb) + digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) if err != nil { return err } - ps := newPoolState(mcp) + sha, err := ParseImagePullspec(mosb.Spec.RenderedImagePushspec, digestConfigMap.Data["digest"]) + if err != nil { + return err + } - // Set the annotation or field to point to the newly-built container image. - klog.V(4).Infof("Setting new image pullspec for %s to %s", ps.Name(), imagePullspec) - ps.SetImagePullspec(imagePullspec) + // now, all we need is to make sure this is used all around. (node controller, getters, etc) + mosc.Status.CurrentImagePullspec = sha + mosb.Status.FinalImagePushspec = sha - // Remove the build object reference from the MachineConfigPool since we're - // not using it anymore. - ps.DeleteBuildRefForCurrentMachineConfig() + if err := ctrl.postBuildCleanup(mosc, mosb, false); err != nil { + return fmt.Errorf("could not do post-build cleanup: %w", err) + } + + bs := ctrlcommon.NewMachineOSBuildState(mosb) - // Adjust the MachineConfigPool status to indicate success. - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs.SetBuildConditions([]metav1.Condition{ + { + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Reason: "BuildSucceeded", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionTrue, + Reason: "Ready", + Message: "Build Ready", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionTrue, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) + return ctrl.updateConfigAndBuild(mosc, bs.Build) }) } // Marks a given MachineConfigPool as build pending. -func (ctrl *Controller) markBuildPendingWithObjectRef(ps *poolState, objRef corev1.ObjectReference) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - - ps := newPoolState(mcp) +func (ctrl *Controller) markBuildPendingWithObjectRef(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, objRef corev1.ObjectReference) error { + klog.V(4).Infof("Build %s for pool %s marked pending with object reference %v", mosb.Name, mosc.Spec.MachineConfigPool.Name, objRef) - klog.Infof("Build for %s marked pending with object reference %v", ps.Name(), objRef) + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + bs := ctrlcommon.NewMachineOSBuildState(mosb) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs.SetBuildConditions([]metav1.Condition{ + { + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionTrue, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Reason: "BuildPending", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - // If the MachineConfigPool has the build object reference, we just want to - // update the MachineConfigPool's status. - if ps.HasBuildObjectRef(objRef) { - return ctrl.syncAvailableStatus(ps.MachineConfigPool()) + mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), mosc.Spec.MachineConfigPool.Name, metav1.GetOptions{}) + if err != nil { + return err } - // If we added the build object reference, we need to update both the - // MachineConfigPool itself and its status. - if err := ps.AddBuildObjectRef(objRef); err != nil { - return err + mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, objRef) + ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + // add obj ref to mosc + + if bs.Build.Status.BuilderReference == nil { + mosb.Status.BuilderReference = &mcfgv1alpha1.MachineOSBuilderReference{ImageBuilderType: mosc.Spec.BuildInputs.ImageBuilder.ImageBuilderType, PodImageBuilder: &mcfgv1alpha1.ObjectReference{ + Name: objRef.Name, + Group: objRef.GroupVersionKind().Group, + Namespace: objRef.Namespace, + Resource: objRef.ResourceVersion, + }} } + return ctrl.syncAvailableStatus(bs.Build) - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) }) } -func (ctrl *Controller) updatePoolAndSyncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { - // We need to do an API server round-trip to ensure all of our mutations get - // propagated. - updatedPool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) +func (ctrl *Controller) updateConfigSpec(mosc *mcfgv1alpha1.MachineOSConfig) error { + _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Update(context.TODO(), mosc, metav1.UpdateOptions{}) if err != nil { - return fmt.Errorf("could not update MachineConfigPool %q: %w", pool.Name, err) + return fmt.Errorf("could not update MachineOSConfig %q: %w", mosc.Name, err) } - - updatedPool.Status = pool.Status - - return ctrl.syncAvailableStatus(updatedPool) + return nil } +func (ctrl *Controller) updateConfigAndBuild(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().UpdateStatus(context.TODO(), mosc, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineOSConfig%q: %w", mosc.Name, err) + } + newMosb, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Update(context.TODO(), mosb, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineOSBuild %q: %w", mosb.Name, err) + } -// Machine Config Pools + newMosb.Status = mosb.Status -func (ctrl *Controller) addMachineConfigPool(obj interface{}) { - pool := obj.(*mcfgv1.MachineConfigPool).DeepCopy() - klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) - ctrl.enqueueMachineConfigPool(pool) + return ctrl.syncAvailableStatus(newMosb) } -func (ctrl *Controller) getBuildInputs(ps *poolState) (*buildInputs, error) { +// Prepares all of the objects needed to perform an image build. +func (ctrl *Controller) prepareForBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (ImageBuildRequest, error) { + ibr := newImageBuildRequestFromBuildInputs(mosb, mosc) + + // populate the "optional" fields, if the user did not specify them osImageURL, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), machineConfigOSImageURLConfigMapName, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("could not get OS image URL: %w", err) + return ibr, fmt.Errorf("could not get OS image URL: %w", err) } + moscNew := mosc.DeepCopy() - onClusterBuildConfig, err := ctrl.getOnClusterBuildConfig(ps) - if err != nil { - return nil, fmt.Errorf("could not get configmap %q: %w", OnClusterBuildConfigMapName, err) + url := newExtensionsImageInfo(osImageURL, mosc) + if len(moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec) == 0 { + moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec = url.Pullspec + } + url = newBaseImageInfo(osImageURL, mosc) + if len(moscNew.Spec.BuildInputs.BaseOSImagePullspec) == 0 { + moscNew.Spec.BuildInputs.BaseOSImagePullspec = url.Pullspec + moscNew.Spec.BuildInputs.ReleaseVersion = osImageURL.Data[releaseVersionConfigKey] } - customDockerfiles, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), customDockerfileConfigMapName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, fmt.Errorf("could not retrieve %s ConfigMap: %w", customDockerfileConfigMapName, err) + etcPkiEntitlements, err := ctrl.getOptionalSecret(etcPkiEntitlementSecretName) + if err != nil { + return ImageBuildRequest{}, err } - currentMC := ps.CurrentMachineConfig() + ibr.HasEtcPkiEntitlementKeys = etcPkiEntitlements != nil - mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), currentMC, metav1.GetOptions{}) + etcPkiRpmGpgKeys, err := ctrl.getOptionalSecret(etcPkiRpmGpgSecretName) if err != nil { - return nil, fmt.Errorf("could not get MachineConfig %s: %w", currentMC, err) + return ImageBuildRequest{}, err } - inputs := &buildInputs{ - onClusterBuildConfig: onClusterBuildConfig, - osImageURL: osImageURL, - customDockerfiles: customDockerfiles, - pool: ps.MachineConfigPool(), - machineConfig: mc, + ibr.HasEtcPkiRpmGpgKeys = etcPkiRpmGpgKeys != nil + + etcYumReposDConfigs, err := ctrl.getOptionalConfigMap(etcYumReposDConfigMapName) + if err != nil { + return ImageBuildRequest{}, err } - return inputs, nil -} + ibr.HasEtcYumReposDConfigs = etcYumReposDConfigs != nil -// Prepares all of the objects needed to perform an image build. -func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, error) { - ibr := newImageBuildRequestFromBuildInputs(inputs) + // make sure to get these new settings + ibr.MachineOSConfig = moscNew - mcConfigMap, err := ibr.toConfigMap(inputs.machineConfig) + mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), mosb.Spec.DesiredConfig.Name, metav1.GetOptions{}) if err != nil { - return ImageBuildRequest{}, fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", inputs.machineConfig.Name, err) + return ibr, err + } + + mcConfigMap, err := ibr.toConfigMap(mc) // ?????? + if err != nil { + return ImageBuildRequest{}, fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", mosb.Spec.DesiredConfig.Name, err) // ???? } _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), mcConfigMap, metav1.CreateOptions{}) @@ -975,7 +1080,7 @@ func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, return ImageBuildRequest{}, fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", mcConfigMap.Name, err) } - klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", inputs.machineConfig.Name, mcConfigMap.Name) + klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", mosb.Spec.DesiredConfig.Name, mcConfigMap.Name) dockerfileConfigMap, err := ibr.dockerfileToConfigMap() if err != nil { @@ -992,121 +1097,117 @@ func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, return ibr, nil } -// Determines if we should run a build, then starts a build pod to perform the -// build, and updates the MachineConfigPool with an object reference for the -// build pod. -func (ctrl *Controller) startBuildForMachineConfigPool(ps *poolState) error { - - if ctrlcommon.DoARebuild(ps.pool) { - // delete FAILED build attempts and builds - // delete rendered containerfile, MC, configmaps etc. - // Delete the actual build object itself. - // if we are rebuilding, we cannot ignore DNE. All of these objects should exist. - err := ctrl.postBuildCleanup(ps.pool, false) - if err != nil { - return fmt.Errorf("Could not update pool when triggering a rebuild: %v", err) - } - // remove annotation - delete(ps.pool.Labels, ctrlcommon.RebuildPoolLabel) - err = ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) - if err != nil { - return fmt.Errorf("Could not update pool when triggering a rebuild: %v", err) - } +// Fetches an optional secret to inject into the build. Returns a nil error if +// the secret is not found. +func (ctrl *Controller) getOptionalSecret(secretName string) (*corev1.Secret, error) { + optionalSecret, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err == nil { + klog.Infof("Optional build secret %q found, will include in build", secretName) + return optionalSecret, nil + } + if k8serrors.IsNotFound(err) { + klog.Infof("Could not find optional secret %q, will not include in build", secretName) + return nil, nil } - inputs, err := ctrl.getBuildInputs(ps) - if err != nil { - return fmt.Errorf("could not fetch build inputs: %w", err) + + return nil, fmt.Errorf("could not retrieve optional secret: %s: %w", secretName, err) +} + +// Fetches an optional ConfigMap to inject into the build. Returns a nil error if +// the ConfigMap is not found. +func (ctrl *Controller) getOptionalConfigMap(configmapName string) (*corev1.ConfigMap, error) { + optionalConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), configmapName, metav1.GetOptions{}) + if err == nil { + klog.Infof("Optional build ConfigMap %q found, will include in build", configmapName) + return optionalConfigMap, nil } - ibr, err := ctrl.prepareForBuild(inputs) - if err != nil { - return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ps.Name(), err) + if k8serrors.IsNotFound(err) { + klog.Infof("Could not find ConfigMap %q, will not include in build", configmapName) + return nil, nil } - objRef, err := ctrl.imageBuilder.StartBuild(ibr) + return nil, fmt.Errorf("could not retrieve optional ConfigMap: %s: %w", configmapName, err) +} + +// Determines if we should run a build, then starts a build pod to perform the +// build, and updates the MachineConfigPool with an object reference for the +// build pod. +func (ctrl *Controller) startBuildForMachineConfigPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + + // we need to add osImageURL to mosbuild, will reduce api calls to configmaps + // ocb config will live in th mosb + // pool will live in the mosb + // mc we can get based off the pool specified in the mosb.... though, given how we could use this in two places + ourConfig, err := ctrl.machineOSConfigLister.Get(mosb.Spec.MachineOSConfig.Name) if err != nil { return err } - - return ctrl.markBuildPendingWithObjectRef(ps, *objRef) -} - -// Gets the ConfigMap which specifies the name of the base image pull secret, final image pull secret, and final image pullspec. -func (ctrl *Controller) getOnClusterBuildConfig(ps *poolState) (*corev1.ConfigMap, error) { - onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) + // Replace the user-supplied tag (if present) with the name of the + // rendered MachineConfig for uniqueness. This will also allow us to + // eventually do a pre-build registry query to determine if we need to + // perform a build. + named, err := reference.ParseNamed(ourConfig.Spec.BuildInputs.RenderedImagePushspec) if err != nil { - return nil, fmt.Errorf("could not get build controller config %q: %w", OnClusterBuildConfigMapName, err) + return err } - requiredKeys := []string{ - BaseImagePullSecretNameConfigKey, - FinalImagePushSecretNameConfigKey, - FinalImagePullspecConfigKey, + tagged, err := reference.WithTag(named, mosb.Spec.DesiredConfig.Name) + if err != nil { + return fmt.Errorf("could not add tag %s to image pullspec %s: %w", mosb.Spec.DesiredConfig.Name, ourConfig.Spec.BuildInputs.RenderedImagePushspec, err) } - needToUpdateConfigMap := false - finalImagePullspecWithTag := "" - - currentMC := ps.CurrentMachineConfig() + ourConfig.Status.CurrentImagePullspec = tagged.String() - for _, key := range requiredKeys { - val, ok := onClusterBuildConfigMap.Data[key] - if !ok { - return nil, fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + secrets := make(map[string]string) + secrets["push"] = ourConfig.Spec.BuildInputs.RenderedImagePushSecret.Name + secrets["pull"] = ourConfig.Spec.BuildInputs.BaseImagePullSecret.Name + updateMOSC := false + for key, s := range secrets { + if s == "" { + continue } - - if key == BaseImagePullSecretNameConfigKey || key == FinalImagePushSecretNameConfigKey { - secret, err := ctrl.validatePullSecret(val) - if err != nil { - return nil, err - } - - if strings.Contains(secret.Name, "canonical") { - klog.Infof("Updating build controller config %s to indicate we have a canonicalized secret %s", OnClusterBuildConfigMapName, secret.Name) - onClusterBuildConfigMap.Data[key] = secret.Name - needToUpdateConfigMap = true - } + newS, err := ctrl.validatePullSecret(s) + if err != nil { + return err } - if key == FinalImagePullspecConfigKey { - // Replace the user-supplied tag (if present) with the name of the - // rendered MachineConfig for uniqueness. This will also allow us to - // eventually do a pre-build registry query to determine if we need to - // perform a build. - named, err := reference.ParseNamed(val) - if err != nil { - return nil, fmt.Errorf("could not parse %s with %q: %w", key, val, err) - } - - tagged, err := reference.WithTag(named, currentMC) - if err != nil { - return nil, fmt.Errorf("could not add tag %s to image pullspec %s: %w", currentMC, val, err) + if strings.Contains(newS.Name, "canonical") { + updateMOSC = true + klog.Infof("Updating build controller config to indicate we have a canonicalized secret %s", newS.Name) + switch key { + case "push": + ourConfig.Spec.BuildInputs.RenderedImagePushSecret.Name = newS.Name + case "pull": + ourConfig.Spec.BuildInputs.BaseImagePullSecret.Name = newS.Name } - - finalImagePullspecWithTag = tagged.String() } } - // If we had to canonicalize a secret, that means the ConfigMap no longer - // points to the expected secret. So let's update the ConfigMap in the API - // server for the sake of consistency. - if needToUpdateConfigMap { - klog.Infof("Updating build controller config") - // TODO: Figure out why this causes failures with resourceVersions. - onClusterBuildConfigMap, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(context.TODO(), onClusterBuildConfigMap, metav1.UpdateOptions{}) - if err != nil { - return nil, fmt.Errorf("could not update configmap %q: %w", OnClusterBuildConfigMapName, err) - } + // ok + // we need to 1) replace tag + + ibr, err := ctrl.prepareForBuild(mosb, ourConfig) + if err != nil { + return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ourConfig.Spec.MachineConfigPool.Name, err) } - // We don't want to write this back to the API server since it's only useful - // for this specific build. TODO: Migrate this to the ImageBuildRequest - // object so that it's generated on-demand instead. - onClusterBuildConfigMap.Data[FinalImagePullspecConfigKey] = finalImagePullspecWithTag + objRef, err := ctrl.imageBuilder.StartBuild(ibr) + + if err != nil { + return err + } - return onClusterBuildConfigMap, err + err = ctrl.markBuildPendingWithObjectRef(mosc, mosb, *objRef) + if err != nil { + return err + } + if updateMOSC { + return ctrl.updateConfigSpec(ourConfig) + } + return nil } // Ensure that the supplied pull secret exists, is in the correct format, etc. @@ -1174,206 +1275,268 @@ func (ctrl *Controller) handleCanonicalizedPullSecret(secret *corev1.Secret) (*c return out, nil } -// If one wants to opt out, this removes all of the statuses and object -// references from a given MachineConfigPool. -func (ctrl *Controller) finalizeOptOut(ps *poolState) error { - if err := ctrl.postBuildCleanup(ps.MachineConfigPool(), true); err != nil { - return err - } +func (ctrl *Controller) addMachineOSConfig(cur interface{}) { + m := cur.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + ctrl.enqueueMachineOSConfig(m) + klog.V(4).Infof("Adding MachineOSConfig %s", m.Name) - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.MachineConfigPool().Name, metav1.GetOptions{}) - if err != nil { - return err - } - - ps := newPoolState(mcp) - ps.DeleteBuildRefForCurrentMachineConfig() - ps.ClearImagePullspec() - ps.ClearAllBuildConditions() - - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) - }) } -// Fires whenever a MachineConfigPool is updated. -func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { - oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() - curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() +func (ctrl *Controller) updateMachineOSConfig(old, cur interface{}) { + oldMOSC := old.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + curMOSC := cur.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() - klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) - - doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, oldPool, curPool) - if err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + if equality.Semantic.DeepEqual(oldMOSC.Spec.BuildInputs, curMOSC.Spec.BuildInputs) { + // we do not want to trigger an update func just for MOSC status, we dont act on the status return } - switch { - // We've transitioned from a layered pool to a non-layered pool. - case ctrlcommon.IsLayeredPool(oldPool) && !ctrlcommon.IsLayeredPool(curPool): - klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) - if err := ctrl.finalizeOptOut(newPoolState(curPool)); err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + klog.Infof("Updating MachineOSConfig %s", oldMOSC.Name) + + doABuild := configChangeCauseBuild(oldMOSC, curMOSC) + if doABuild { + build, exists := ctrl.doesMOSBExist(curMOSC) + if exists { + ctrl.startBuildForMachineConfigPool(curMOSC, build) // ? + } + // if the mosb does not exist, lets just enqueue the mosc and let the sync handler take care of the new object creation + } + ctrl.enqueueMachineOSConfig(curMOSC) +} + +func (ctrl *Controller) deleteMachineOSConfig(cur interface{}) { + m, ok := cur.(*mcfgv1alpha1.MachineOSConfig) + // first, we need to stop and delete any existing builds. + mosb, err := ctrl.machineOSBuildLister.Get(fmt.Sprintf("%s-builder", m.Spec.MachineConfigPool.Name)) + if err == nil { + if running, _ := ctrl.imageBuilder.IsBuildRunning(mosb, m); running { + // we need to stop the build. + ctrl.imageBuilder.DeleteBuildObject(mosb, m) + ctrl.markBuildInterrupted(m, mosb) + } + ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Delete(context.TODO(), mosb.Name, metav1.DeleteOptions{}) + } + if !ok { + tombstone, ok := cur.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", cur)) return } - // We need to do a build. - case doABuild || (ctrlcommon.IsLayeredPool(curPool) && ctrlcommon.DoARebuild(curPool)): - klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) - if err := ctrl.startBuildForMachineConfigPool(newPoolState(curPool)); err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + m, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSConfig) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineOSConfig %#v", cur)) return } - // Everything else. - default: - klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) + } + klog.V(4).Infof("Deleting MachineOSBuild %s", m.Name) +} + +func (ctrl *Controller) updateMachineOSBuild(old, cur interface{}) { + oldMOSB := old.(*mcfgv1alpha1.MachineOSBuild).DeepCopy() + curMOSB := cur.(*mcfgv1alpha1.MachineOSBuild).DeepCopy() + + if equality.Semantic.DeepEqual(oldMOSB.Status, oldMOSB.Status) { + // we do not want to trigger an update func just for MOSB spec, we dont act on the spec + return + } + + klog.Infof("Updating MachineOSBuild %s", oldMOSB.Name) + ourConfig, err := ctrl.machineOSConfigLister.Get(curMOSB.Spec.MachineOSConfig.Name) + if err != nil { + return } - ctrl.enqueueMachineConfigPool(curPool) + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, ourConfig, oldMOSB, curMOSB) + if err != nil { + return + } + if doABuild { + ctrl.startBuildForMachineConfigPool(ourConfig, curMOSB) + } + ctrl.enqueueMachineOSBuild(curMOSB) } -// Fires whenever a MachineConfigPool is deleted. TODO: Wire up checks for -// deleting any in-progress builds. -func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { - pool, ok := obj.(*mcfgv1.MachineConfigPool) +func (ctrl *Controller) deleteMachineOSBuild(mosb interface{}) { + m, ok := mosb.(*mcfgv1alpha1.MachineOSBuild) if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + tombstone, ok := mosb.(cache.DeletedFinalStateUnknown) if !ok { - utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", mosb)) return } - pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + m, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSBuild) if !ok { - utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineOSBuild %#v", mosb)) return } } - klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) + klog.V(4).Infof("Deleting MachineOSBuild %s", m.Name) } -func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { +func (ctrl *Controller) syncAvailableStatus(mosb *mcfgv1alpha1.MachineOSBuild) error { // I'm not sure what the consequences are of not doing this. //nolint:gocritic // Leaving this here for review purposes. - /* - if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { - return nil - } - */ - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") - apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + sdegraded := apihelpers.NewMachineOSBuildCondition(string(mcfgv1alpha1.MachineOSBuildFailed), metav1.ConditionFalse, "MOSCAvailable", "MOSC") + apihelpers.SetMachineOSBuildCondition(&mosb.Status, *sdegraded) + + if _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(context.TODO(), mosb, metav1.UpdateOptions{}); err != nil { return err } return nil } -func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) - apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { - klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) +func (ctrl *Controller) syncFailingStatus(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, err error) error { + sdegraded := apihelpers.NewMachineOSBuildCondition(string(mcfgv1alpha1.MachineOSBuildFailed), metav1.ConditionTrue, "BuildFailed", fmt.Sprintf("Failed to build configuration for pool %s: %v", mosc.Spec.MachineConfigPool.Name, err)) + apihelpers.SetMachineOSBuildCondition(&mosb.Status, *sdegraded) + if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(context.TODO(), mosb, metav1.UpdateOptions{}); updateErr != nil { + klog.Errorf("Error updating MachineOSBuild %s: %v", mosb.Name, updateErr) } return err } -// Determine if we have a config change. -func isPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { - return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +func configChangeCauseBuild(old, cur *mcfgv1alpha1.MachineOSConfig) bool { + return equality.Semantic.DeepEqual(old.Spec.BuildInputs, cur.Spec.BuildInputs) } -// Checks our pool to see if we can do a build. We base this off of a few criteria: -// 1. Is the pool opted into layering? -// 2. Do we have an object reference to an in-progress build? -// 3. Is the pool degraded? -// 4. Is our build in a specific state? -// -// Returns true if we are able to build. -func canPoolBuild(ps *poolState) bool { - // If we don't have a layered pool, we should not build. - if !ps.IsLayered() { - return false - } +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func shouldWeDoABuild(builder interface { + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) +}, mosc *mcfgv1alpha1.MachineOSConfig, oldMOSB, curMOSB *mcfgv1alpha1.MachineOSBuild) (bool, error) { + // get desired and current. If desired != current, + // assume we are doing a build. remove the whole layered pool annotation workflow - // If we have a reference to an in-progress build, we should not build. - if ps.HasBuildObjectForCurrentMachineConfig() { - return false - } + if oldMOSB.Spec.DesiredConfig != curMOSB.Spec.DesiredConfig { + // the desiredConfig changed. We need to do an update + // but check that there isn't already a build. + // If a build is found running, we should not do a build. + isRunning, err := builder.IsBuildRunning(curMOSB, mosc) - // If the pool is degraded, we should not build. - if ps.IsAnyDegraded() { - return false - } + return !isRunning, err - // If the pool is in any of these states, we should not build. - if ps.IsBuilding() { - return false + // check for image pull sped changing? } + return false, nil +} - if ps.IsBuildPending() { - return false +// Determines if a pod or build is managed by this controller by examining its labels. +func hasAllRequiredOSBuildLabels(labels map[string]string) bool { + requiredLabels := []string{ + ctrlcommon.OSImageBuildPodLabel, + targetMachineConfigPoolLabel, + desiredConfigLabel, } - if ps.IsBuildFailure() { - return false + for _, label := range requiredLabels { + if _, ok := labels[label]; !ok { + return false + } } return true } -// Determines if we should do a build based upon the state of our -// MachineConfigPool, the presence of a build pod, etc. -func shouldWeDoABuild(builder interface { - IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) -}, oldPool, curPool *mcfgv1.MachineConfigPool) (bool, error) { - ps := newPoolState(curPool) +func (ctrl *Controller) doesMOSBExist(config *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSBuild, bool) { + mosb, err := ctrl.machineOSBuildLister.Get(fmt.Sprintf("%s-builder", config.Spec.MachineConfigPool.Name)) + if err != nil && k8serrors.IsNotFound(err) { + return nil, false + } else if mosb != nil { + return mosb, true + } + return nil, false +} - // If we don't have a layered pool, we should not build. - poolStateSuggestsBuild := canPoolBuild(ps) && - // If we have a config change or we're missing an image pullspec label, we - // should do a build. - (isPoolConfigChange(oldPool, curPool) || !ps.HasOSImage()) && - // If we're missing a build pod reference, it likely means we don't need to - // do a build. - !ps.HasBuildObjectRefName(newImageBuildRequest(curPool).getBuildName()) +func (ctrl *Controller) CreateBuildFromConfig(config *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSBuildStatus, error) { + mcp, err := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + if err != nil { + return nil, nil, err + } + now := metav1.Now() + build := mcfgv1alpha1.MachineOSBuild{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineOSBuild", + APIVersion: "machineconfiguration.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-builder", config.Spec.MachineConfigPool.Name, mcp.Spec.Configuration.Name), + }, + Spec: mcfgv1alpha1.MachineOSBuildSpec{ + RenderedImagePushspec: config.Spec.BuildInputs.RenderedImagePushspec, + Version: 1, + ConfigGeneration: 1, + DesiredConfig: mcfgv1alpha1.RenderedMachineConfigReference{ + Name: mcp.Spec.Configuration.Name, + }, + MachineOSConfig: mcfgv1alpha1.MachineOSConfigReference{ + Name: config.Name, + }, + }, + Status: mcfgv1alpha1.MachineOSBuildStatus{ + BuildStart: &now, + }, + } + mosb, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Create(context.TODO(), &build, metav1.CreateOptions{}) + return mosb, &build.Status, err +} + +func (ctrl *Controller) getConfigAndBuildForPool(pool *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { + moscs, err := ctrl.machineOSConfigLister.List(labels.Everything()) + if err != nil { + return nil, nil, err + } - if !poolStateSuggestsBuild { - return false, nil + mosbs, err := ctrl.machineOSBuildLister.List(labels.Everything()) + if err != nil { + return nil, nil, err } - // If a build is found running, we should not do a build. - isRunning, err := builder.IsBuildRunning(curPool) + var mosb *mcfgv1alpha1.MachineOSBuild + var mosc *mcfgv1alpha1.MachineOSConfig - return !isRunning, err -} + for _, config := range moscs { + if config.Spec.MachineConfigPool.Name == pool.Name { + mosc = config + break + } + } + + if mosc == nil { + return nil, nil, nil + } -// Enumerates all of the build-related MachineConfigPool condition types. -func getMachineConfigPoolBuildConditions() []mcfgv1.MachineConfigPoolConditionType { - return []mcfgv1.MachineConfigPoolConditionType{ - mcfgv1.MachineConfigPoolBuildFailed, - mcfgv1.MachineConfigPoolBuildPending, - mcfgv1.MachineConfigPoolBuildSuccess, - mcfgv1.MachineConfigPoolBuilding, + for _, build := range mosbs { + if build.Spec.MachineOSConfig.Name == mosc.Name { + if build.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + mosb = build + break + } + } } + + return mosc, mosb, nil } -// Determines if a pod or build is managed by this controller by examining its labels. -func hasAllRequiredOSBuildLabels(labels map[string]string) bool { - requiredLabels := []string{ - ctrlcommon.OSImageBuildPodLabel, - targetMachineConfigPoolLabel, - desiredConfigLabel, +// Determines if the build pod is in an error state by examining the individual +// container statuses. Returns true if a single container is in an error state. +func isBuildPodError(pod *corev1.Pod) bool { + errStates := map[string]struct{}{ + "ErrImagePull": struct{}{}, + "CreateContainerError": struct{}{}, } - for _, label := range requiredLabels { - if _, ok := labels[label]; !ok { - return false + for _, container := range pod.Status.ContainerStatuses { + if container.State.Waiting != nil { + if _, ok := errStates[container.State.Waiting.Reason]; ok { + return true + } + } + + if container.State.Terminated != nil && container.State.Terminated.ExitCode != 0 { + return true } } - return true + return false } diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go index 462c7057cd..e372c2e6da 100644 --- a/pkg/controller/build/build_controller_test.go +++ b/pkg/controller/build/build_controller_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "reflect" "strings" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" @@ -346,44 +345,6 @@ func (b *buildControllerTestFixture) startBuildControllerWithCustomPodBuilder() return clients } -// Helper that determines if the build is a success. -func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.HasOSImage() && - ps.GetOSImage() == expectedImagePullspecWithSHA && - ps.IsBuildSuccess() && - !ps.HasBuildObjectForCurrentMachineConfig() && - machineConfigPoolHasMachineConfigRefs(mcp) && - reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration) -} - -func isMCPBuildInProgress(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.IsBuilding() - -} - -func isMCPBuildSuccessMsg(mcp *mcfgv1.MachineConfigPool) string { - sb := &strings.Builder{} - - ps := newPoolState(mcp) - - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Has OS image? %v\n", ps.HasOSImage()) - fmt.Fprintf(sb, "Matches expected pullspec (%s)? %v\n", expectedImagePullspecWithSHA, ps.GetOSImage() == expectedImagePullspecWithSHA) - fmt.Fprintf(sb, "Is build success? %v\n", ps.IsBuildSuccess()) - fmt.Fprintf(sb, "Is degraded? %v\n", ps.IsDegraded()) - fmt.Fprintf(sb, "Has build object ref for current MachineConfig? %v. Build refs found: %v\n", ps.HasBuildObjectForCurrentMachineConfig(), ps.GetBuildObjectRefs()) - fmt.Fprintf(sb, "Has MachineConfig refs? %v\n", machineConfigPoolHasMachineConfigRefs(mcp)) - fmt.Fprintf(sb, "Spec.Configuration == Status.Configuration? %v\n", reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration)) - - return sb.String() -} - func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool { expectedMCP := newMachineConfigPool(pool.Name) ps := newPoolState(pool) @@ -397,33 +358,6 @@ func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool return true } -// Helper that determines if the build was a failure. -func isMCPBuildFailure(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.IsBuildFailure() && - ps.IsDegraded() && - ps.HasBuildObjectForCurrentMachineConfig() && - machineConfigPoolHasMachineConfigRefs(mcp) && - reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration) -} - -func isMCPBuildFailureMsg(mcp *mcfgv1.MachineConfigPool) string { - sb := &strings.Builder{} - - ps := newPoolState(mcp) - - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Is build failure? %v\n", ps.IsBuildFailure()) - fmt.Fprintf(sb, "Is degraded? %v\n", ps.IsDegraded()) - fmt.Fprintf(sb, "Has build object ref for current MachineConfig? %v. Build refs found: %v\n", ps.HasBuildObjectForCurrentMachineConfig(), ps.GetBuildObjectRefs()) - fmt.Fprintf(sb, "Has MachineConfig refs? %v\n", machineConfigPoolHasMachineConfigRefs(mcp)) - fmt.Fprintf(sb, "Spec.Configuration == Status.Configuration? %v\n", reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration)) - - return sb.String() -} - // Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. func testOptInMCPCustomBuildPod(ctx context.Context, t *testing.T, cs *Clients, poolName string) { mcp := optInMCP(ctx, t, cs, poolName) @@ -554,13 +488,13 @@ func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optIn checkFunc := func(mcp *mcfgv1.MachineConfigPool) bool { ps := newPoolState(mcp) - if ps.IsLayered() { - return false - } + //if ps.IsLayered() { + // return false + //} - if ps.HasBuildObjectForCurrentMachineConfig() { - return false - } + //if ps.HasBuildObjectForCurrentMachineConfig() { + // return false + //} if len(ps.GetAllBuildConditions()) != 0 { return false @@ -573,8 +507,8 @@ func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optIn sb := &strings.Builder{} ps := newPoolState(mcp) - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Has build object for current MachineConfig? %v\n", ps.HasBuildObjectForCurrentMachineConfig()) + //fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) + //fmt.Fprintf(sb, "Has build object for current MachineConfig? %v\n", ps.HasBuildObjectForCurrentMachineConfig()) fmt.Fprintf(sb, "Build objects: %v\n", ps.GetBuildObjectRefs()) buildConditions := ps.GetAllBuildConditions() fmt.Fprintf(sb, "Has no build conditions? %v. Build conditions: %v\n", len(buildConditions) == 0, buildConditions) diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go index dbf6f03ffc..afcd137621 100644 --- a/pkg/controller/build/fixtures_test.go +++ b/pkg/controller/build/fixtures_test.go @@ -602,9 +602,9 @@ func getPoolFailureMsg(mcp *mcfgv1.MachineConfigPool, expectedToHaveBuildRef boo fmt.Fprintf(msg, "Is only one build condition true? %v\n", isOnlyOneBuildConditionTrue(mcp)) - hasBuildObject := ps.HasBuildObjectForCurrentMachineConfig() + //hasBuildObject := ps.HasBuildObjectForCurrentMachineConfig() buildObjectRefs := ps.GetBuildObjectRefs() - fmt.Fprintf(msg, "Has ref? %v. Expected: %v. Actual: %v.\n", hasBuildObject, expectedToHaveBuildRef, hasBuildObject == expectedToHaveBuildRef) + //fmt.Fprintf(msg, "Has ref? %v. Expected: %v. Actual: %v.\n", hasBuildObject, expectedToHaveBuildRef, hasBuildObject == expectedToHaveBuildRef) if expectedToHaveBuildRef { fmt.Fprintf(msg, "Has only one build ref? %v. Build refs found: %v\n", len(buildObjectRefs) == 1, buildObjectRefs) @@ -641,11 +641,11 @@ func poolReachesExpectedBuildState(mcp *mcfgv1.MachineConfigPool, expectedToHave return false } - ps := newPoolState(mcp) + //ps := newPoolState(mcp) - if expectedToHaveBuildRef { - return ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 1 - } + //if expectedToHaveBuildRef { + // return ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 1 + //} - return !ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 0 + //return !ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 0 } diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go index 2fb0606d98..2571b9dfee 100644 --- a/pkg/controller/build/helpers.go +++ b/pkg/controller/build/helpers.go @@ -12,6 +12,9 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/opencontainers/go-digest" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -19,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" ) const ( @@ -106,7 +108,7 @@ func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (s // Parses an image pullspec from a string and an image SHA and replaces any // tags on the pullspec with the provided image SHA. -func parseImagePullspec(pullspec, imageSHA string) (string, error) { +func ParseImagePullspec(pullspec, imageSHA string) (string, error) { imageDigest, err := digest.Parse(imageSHA) if err != nil { return "", err @@ -241,56 +243,39 @@ func ignoreIsNotFoundErr(err error) error { return nil } -// ValidateOnClusterBuildConfig validates the existence of the on-cluster-build-config ConfigMap and the presence of the secrets it refers to. -func ValidateOnClusterBuildConfig(kubeclient clientset.Interface) error { - // Validate the presence of the on-cluster-build-config ConfigMap - cm, err := kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) - if err != nil && k8serrors.IsNotFound(err) { - return fmt.Errorf("%s ConfigMap missing, did you create it?", OnClusterBuildConfigMapName) - } - +// ValidateOnClusterBuildConfig validates the existence of the MachineOSConfig and the required build inputs. +func ValidateOnClusterBuildConfig(kubeclient clientset.Interface, mcfgclient versioned.Interface, layeredMCPs []*mcfgv1.MachineConfigPool) error { + // Validate the presence of the MachineOSConfig + machineOSConfigs, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) if err != nil { - return fmt.Errorf("could not get ConfigMap %s: %w", OnClusterBuildConfigMapName, err) + return err } - secretNames := []string{BaseImagePullSecretNameConfigKey, FinalImagePushSecretNameConfigKey} - imagePullspecs := []string{FinalImagePullspecConfigKey} - - // Validate the presence of secrets it refers to - for _, key := range secretNames { - val, ok := cm.Data[key] - if !ok { - return fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + moscForPoolExists := false + var moscForPool *mcfgv1alpha1.MachineOSConfig + for _, pool := range layeredMCPs { + moscForPoolExists = false + for _, mosc := range machineOSConfigs.Items { + if mosc.Spec.MachineConfigPool.Name == pool.Name { + moscForPoolExists = true + moscForPool = &mosc + break + } } + if !moscForPoolExists { + return fmt.Errorf("MachineOSConfig for pool %s missing, did you create it?", pool.Name) - if val == "" { - return fmt.Errorf("key %q in configmap %s has an empty value", key, OnClusterBuildConfigMapName) } - - if err := validateSecret(kubeclient, val); err != nil { + if err := validateSecret(kubeclient, moscForPool.Spec.BuildInputs.BaseImagePullSecret.Name); err != nil { return err } - } - - // Validate the image pullspec(s) it referes to. - for _, key := range imagePullspecs { - val, ok := cm.Data[key] - if !ok { - return fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + if err := validateSecret(kubeclient, moscForPool.Spec.BuildInputs.RenderedImagePushSecret.Name); err != nil { + return err } - - if val == "" { - return fmt.Errorf("key %q in configmap %s has an empty value", key, OnClusterBuildConfigMapName) + if _, err := reference.ParseNamed(moscForPool.Spec.BuildInputs.RenderedImagePushspec); err != nil { + return err } - if _, err := reference.ParseNamed(val); err != nil { - return fmt.Errorf("could not parse %s with %q: %w", key, val, err) - } - } - - // Validate the image builder type from the ConfigMap - if _, err := GetImageBuilderType(cm); err != nil { - return err } return nil @@ -313,28 +298,3 @@ func validateSecret(kubeclient clientset.Interface, secretName string) error { return nil } - -// Determines which image builder to start based upon the imageBuilderType key -// in the on-cluster-build-config ConfigMap. Defaults to custom-pod-builder. -func GetImageBuilderType(cm *corev1.ConfigMap) (ImageBuilderType, error) { - configMapImageBuilder, ok := cm.Data[ImageBuilderTypeConfigMapKey] - cmBuilder := ImageBuilderType(configMapImageBuilder) - defaultBuilder := OpenshiftImageBuilder - - if !ok { - klog.Infof("%s not set, defaulting to %q", ImageBuilderTypeConfigMapKey, defaultBuilder) - return defaultBuilder, nil - } - - if ok && configMapImageBuilder == "" { - klog.Infof("%s empty, defaulting to %q", ImageBuilderTypeConfigMapKey, defaultBuilder) - return defaultBuilder, nil - } - - if !validImageBuilderTypes.Has(ImageBuilderType(configMapImageBuilder)) { - return "", &ErrInvalidImageBuilder{Message: fmt.Sprintf("invalid image builder type %q, valid types: %v", configMapImageBuilder, validImageBuilderTypes.UnsortedList()), InvalidType: configMapImageBuilder} - } - - klog.Infof("%s set to %q", ImageBuilderTypeConfigMapKey, configMapImageBuilder) - return cmBuilder, nil -} diff --git a/pkg/controller/build/image_build_controller.go b/pkg/controller/build/image_build_controller.go deleted file mode 100644 index dc6b98bd85..0000000000 --- a/pkg/controller/build/image_build_controller.go +++ /dev/null @@ -1,335 +0,0 @@ -package build - -import ( - "context" - "fmt" - "strings" - "time" - - buildv1 "github.com/openshift/api/build/v1" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" - "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" -) - -// Controller defines the build controller. -type ImageBuildController struct { - *Clients - *informers - - eventRecorder record.EventRecorder - - // The function to call whenever we've encountered a Build. This function is - // responsible for examining the Build to determine what state its in and map - // that state to the appropriate MachineConfigPool object. - buildHandler func(*buildv1.Build) error - - syncHandler func(pod string) error - enqueueBuild func(*buildv1.Build) - - buildLister buildlistersv1.BuildLister - - buildListerSynced cache.InformerSynced - - queue workqueue.RateLimitingInterface - - config BuildControllerConfig -} - -var _ ImageBuilder = (*ImageBuildController)(nil) - -// Returns a new image build controller. -func newImageBuildController( - ctrlConfig BuildControllerConfig, - clients *Clients, - buildHandler func(*buildv1.Build) error, -) *ImageBuildController { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(klog.Infof) - eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) - - ctrl := &ImageBuildController{ - Clients: clients, - informers: newInformers(clients), - eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-imagebuildcontroller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-imagebuildcontroller"), - config: ctrlConfig, - buildHandler: buildHandler, - } - - // As an aside, why doesn't the constructor here set up all the informers? - ctrl.buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addBuild, - UpdateFunc: ctrl.updateBuild, - DeleteFunc: ctrl.deleteBuild, - }) - - ctrl.buildLister = ctrl.buildInformer.Lister() - ctrl.buildListerSynced = ctrl.buildInformer.Informer().HasSynced - - ctrl.syncHandler = ctrl.syncBuild - ctrl.enqueueBuild = ctrl.enqueueDefault - - return ctrl -} - -func (ctrl *ImageBuildController) enqueue(build *buildv1.Build) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.Add(key) -} - -func (ctrl *ImageBuildController) enqueueRateLimited(build *buildv1.Build) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.AddRateLimited(key) -} - -// enqueueAfter will enqueue a build after the provided amount of time. -func (ctrl *ImageBuildController) enqueueAfter(build *buildv1.Build, after time.Duration) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.AddAfter(key, after) -} - -// enqueueDefault calls a default enqueue function -func (ctrl *ImageBuildController) enqueueDefault(build *buildv1.Build) { - ctrl.enqueueAfter(build, ctrl.config.UpdateDelay) -} - -// Syncs Builds. -func (ctrl *ImageBuildController) syncBuild(key string) error { //nolint:dupl // This does have commonality with the PodBuildController. - start := time.Now() - defer func() { - klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) - }() - klog.Infof("Started syncing pod %s", key) - - _, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - // TODO: Why do I need to set the namespace here? - build, err := ctrl.buildLister.Builds(ctrlcommon.MCONamespace).Get(name) - if k8serrors.IsNotFound(err) { - klog.V(2).Infof("Build %v has been deleted", key) - return nil - } - if err != nil { - return err - } - - build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), build.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !hasAllRequiredOSBuildLabels(build.Labels) { - klog.Infof("Ignoring non-OS image build %s", build.Name) - return nil - } - - if err := ctrl.buildHandler(build); err != nil { - return fmt.Errorf("unable to update with build status: %w", err) - } - - klog.Infof("Updated MachineConfigPool with build status. Build %s in %s", build.Name, build.Status.Phase) - - return nil -} - -// Starts the Image Build Controller. -func (ctrl *ImageBuildController) Run(ctx context.Context, workers int) { - defer utilruntime.HandleCrash() - defer ctrl.queue.ShutDown() - - ctrl.informers.start(ctx) - - if !cache.WaitForCacheSync(ctx.Done(), ctrl.buildListerSynced) { - return - } - - klog.Info("Starting MachineOSBuilder-ImageBuildController") - defer klog.Info("Shutting down MachineOSBuilder-ImageBuildController") - - for i := 0; i < workers; i++ { - go wait.Until(ctrl.worker, time.Second, ctx.Done()) - } - - <-ctx.Done() -} - -// Gets the final image pullspec. In this case, we can interrogate the Build -// object for this information. -func (ctrl *ImageBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { - buildName := newImageBuildRequest(pool).getBuildName() - - build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("could not get build %s for pool %s: %w", buildName, pool.Name, err) - } - - // Get the image digest from the completed build and replace the tag with - // the digest. - if build.Status.OutputDockerImageReference == "" { - return "", fmt.Errorf("no image reference outputted") - } - - if build.Status.Output.To.ImageDigest == "" { - return "", fmt.Errorf("no image digest found") - } - - return parseImagePullspec(build.Status.OutputDockerImageReference, build.Status.Output.To.ImageDigest) -} - -// Deletes the underlying Build object. -func (ctrl *ImageBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { - buildName := newImageBuildRequest(pool).getBuildName() - return ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Delete(context.TODO(), buildName, metav1.DeleteOptions{}) -} - -// Determines if a build is currently running by looking for a corresponding Build. -func (ctrl *ImageBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { - buildName := newImageBuildRequest(pool).getBuildName() - - // First check if we have a build in progress for this MachineConfigPool and rendered config. - _, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return false, err - } - - return err == nil, nil -} - -// Starts a new build, assuming one is not found first. In that case, it -// returns an object reference to the preexisting Build object. -func (ctrl *ImageBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { - targetMC := ibr.Pool.Spec.Configuration.Name - - buildName := ibr.getBuildName() - - // TODO: Find a constant for this: - if !strings.HasPrefix(targetMC, "rendered-") { - return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) - } - - // First check if we have a build in progress for this MachineConfigPool and rendered config. - build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, err - } - - // This means we found a preexisting build build. - if build != nil && err == nil && hasAllRequiredOSBuildLabels(build.Labels) { - klog.Infof("Found preexisting OS image build (%s) for pool %s", build.Name, ibr.Pool.Name) - return toObjectRef(build), nil - } - - klog.Infof("Starting build for pool %s", ibr.Pool.Name) - klog.Infof("Build name: %s", buildName) - klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) - - build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuild(), metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("could not create OS image build: %w", err) - } - - klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, build.Name) - - return toObjectRef(build), nil -} - -// Fires whenever a Build is added. -func (ctrl *ImageBuildController) addBuild(obj interface{}) { - build := obj.(*buildv1.Build).DeepCopy() - klog.V(4).Infof("Adding Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) - if hasAllRequiredOSBuildLabels(build.Labels) { - ctrl.enqueueBuild(build) - } -} - -// Fires whenever a Build is updated. -func (ctrl *ImageBuildController) updateBuild(_, curObj interface{}) { - curBuild := curObj.(*buildv1.Build).DeepCopy() - - isOSImageBuild := hasAllRequiredOSBuildLabels(curBuild.Labels) - - klog.Infof("Updating build %s. Is OS image build? %v", curBuild.Name, isOSImageBuild) - - // Ignore non-OS image builds. - // TODO: Figure out if we can add the filter criteria onto the lister. - if !isOSImageBuild { - return - } - - klog.Infof("Build %s updated", curBuild.Name) - - ctrl.enqueueBuild(curBuild) -} - -func (ctrl *ImageBuildController) handleErr(err error, key interface{}) { - if err == nil { - ctrl.queue.Forget(key) - return - } - - if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { - klog.V(2).Infof("Error syncing build %v: %v", key, err) - ctrl.queue.AddRateLimited(key) - return - } - - utilruntime.HandleError(err) - klog.V(2).Infof("Dropping build %q out of the queue: %v", key, err) - ctrl.queue.Forget(key) - ctrl.queue.AddAfter(key, 1*time.Minute) -} - -func (ctrl *ImageBuildController) deleteBuild(obj interface{}) { - build := obj.(*buildv1.Build).DeepCopy() - klog.V(4).Infof("Deleting Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) - ctrl.enqueueBuild(build) -} - -// worker runs a worker thread that just dequeues items, processes them, and marks them done. -// It enforces that the syncHandler is never invoked concurrently with the same key. -func (ctrl *ImageBuildController) worker() { - for ctrl.processNextWorkItem() { - } -} - -func (ctrl *ImageBuildController) processNextWorkItem() bool { - key, quit := ctrl.queue.Get() - if quit { - return false - } - defer ctrl.queue.Done(key) - - err := ctrl.syncHandler(key.(string)) - ctrl.handleErr(err, key) - - return true -} diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go index 7bd2e00a58..5881b4f528 100644 --- a/pkg/controller/build/image_build_request.go +++ b/pkg/controller/build/image_build_request.go @@ -7,8 +7,8 @@ import ( "strings" "text/template" - buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/helpers" corev1 "k8s.io/api/core/v1" @@ -43,82 +43,76 @@ type ImageInfo struct { // Represents the request to build a layered OS image. type ImageBuildRequest struct { - // The target MachineConfigPool - Pool *mcfgv1.MachineConfigPool - // The base OS image (derived from the machine-config-osimageurl ConfigMap) - BaseImage ImageInfo - // The extensions image (derived from the machine-config-osimageurl ConfigMap) - ExtensionsImage ImageInfo - // The final OS image (desired from the on-cluster-build-config ConfigMap) - FinalImage ImageInfo - // The OpenShift release version (derived from the machine-config-osimageurl ConfigMap) - ReleaseVersion string - // An optional user-supplied Dockerfile that gets injected into the build. - CustomDockerfile string -} - -type buildInputs struct { - onClusterBuildConfig *corev1.ConfigMap - osImageURL *corev1.ConfigMap - customDockerfiles *corev1.ConfigMap - pool *mcfgv1.MachineConfigPool - machineConfig *mcfgv1.MachineConfig + // The target Build object + MachineOSBuild *mcfgv1alpha1.MachineOSBuild + // the cofig the build is based off of + MachineOSConfig *mcfgv1alpha1.MachineOSConfig + // the containerfile designated from the MachineOSConfig + Containerfile string + // Has /etc/pki/entitlement + HasEtcPkiEntitlementKeys bool + // Has /etc/yum.repos.d configs + HasEtcYumReposDConfigs bool + // Has /etc/pki/rpm-gpg configs + HasEtcPkiRpmGpgKeys bool } // Constructs a simple ImageBuildRequest. -func newImageBuildRequest(pool *mcfgv1.MachineConfigPool) ImageBuildRequest { - return ImageBuildRequest{ - Pool: pool.DeepCopy(), +func newImageBuildRequest(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) *ImageBuildRequest { + ibr := &ImageBuildRequest{ + MachineOSConfig: mosc, + MachineOSBuild: mosb, } -} -// Populates the final image info from the on-cluster-build-config ConfigMap. -func newFinalImageInfo(inputs *buildInputs) ImageInfo { - return ImageInfo{ - Pullspec: inputs.onClusterBuildConfig.Data[FinalImagePullspecConfigKey], - PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[FinalImagePushSecretNameConfigKey], - }, + // only support noArch for now + for _, file := range mosc.Spec.BuildInputs.Containerfile { + if file.ContainerfileArch == mcfgv1alpha1.NoArch { + ibr.Containerfile = file.Content + break + } } + + return ibr } // Populates the base image info from both the on-cluster-build-config and // machine-config-osimageurl ConfigMaps. -func newBaseImageInfo(inputs *buildInputs) ImageInfo { +func newBaseImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { return ImageInfo{ - Pullspec: inputs.osImageURL.Data[baseOSContainerImageConfigKey], + Pullspec: osImageURL.Data[baseOSContainerImageConfigKey], PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[BaseImagePullSecretNameConfigKey], + Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, }, } } // Populates the extensions image info from both the on-cluster-build-config // and machine-config-osimageurl ConfigMaps. -func newExtensionsImageInfo(inputs *buildInputs) ImageInfo { +func newExtensionsImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { return ImageInfo{ - Pullspec: inputs.osImageURL.Data[baseOSExtensionsContainerImageConfigKey], + Pullspec: osImageURL.Data[baseOSExtensionsContainerImageConfigKey], PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[BaseImagePullSecretNameConfigKey], + Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, }, } } // Constructs an ImageBuildRequest with all of the images populated from ConfigMaps -func newImageBuildRequestFromBuildInputs(inputs *buildInputs) ImageBuildRequest { - customDockerfile := "" - if inputs.customDockerfiles != nil { - customDockerfile = inputs.customDockerfiles.Data[inputs.pool.Name] +func newImageBuildRequestFromBuildInputs(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) ImageBuildRequest { + ibr := &ImageBuildRequest{ + MachineOSConfig: mosc, + MachineOSBuild: mosb, } - return ImageBuildRequest{ - Pool: inputs.pool.DeepCopy(), - BaseImage: newBaseImageInfo(inputs), - FinalImage: newFinalImageInfo(inputs), - ExtensionsImage: newExtensionsImageInfo(inputs), - ReleaseVersion: inputs.osImageURL.Data[releaseVersionConfigKey], - CustomDockerfile: customDockerfile, + // only support noArch for now + for _, file := range mosc.Spec.BuildInputs.Containerfile { + if file.ContainerfileArch == mcfgv1alpha1.NoArch { + ibr.Containerfile = file.Content + break + } } + + return *ibr } // Renders our Dockerfile and injects it into a ConfigMap for consumption by the image builder. @@ -191,67 +185,6 @@ func (i ImageBuildRequest) renderDockerfile() (string, error) { return out.String(), nil } -// Creates an OpenShift Image Builder build object prewired with all ConfigMaps -// / Secrets / etc. -func (i ImageBuildRequest) toBuild() *buildv1.Build { - skipLayers := buildv1.ImageOptimizationSkipLayers - - // The Build API requires the Dockerfile field to be set, even if you - // override it via a ConfigMap. - dockerfile := "FROM scratch" - - return &buildv1.Build{ - TypeMeta: metav1.TypeMeta{ - Kind: "Build", - }, - ObjectMeta: i.getObjectMeta(i.getBuildName()), - Spec: buildv1.BuildSpec{ - CommonSpec: buildv1.CommonSpec{ - // TODO: We may need to configure a Build Input here so we can wire up - // the pull secrets for the base OS image and the extensions image. - Source: buildv1.BuildSource{ - Type: buildv1.BuildSourceDockerfile, - Dockerfile: &dockerfile, - ConfigMaps: []buildv1.ConfigMapBuildSource{ - { - // Provides the rendered MachineConfig in a gzipped / - // base64-encoded format. - ConfigMap: corev1.LocalObjectReference{ - Name: i.getMCConfigMapName(), - }, - DestinationDir: "machineconfig", - }, - { - // Provides the rendered Dockerfile. - ConfigMap: corev1.LocalObjectReference{ - Name: i.getDockerfileConfigMapName(), - }, - }, - }, - }, - Strategy: buildv1.BuildStrategy{ - DockerStrategy: &buildv1.DockerBuildStrategy{ - // Squashing layers is good as long as it doesn't cause problems with what - // the users want to do. It says "some syntax is not supported" - ImageOptimizationPolicy: &skipLayers, - }, - Type: buildv1.DockerBuildStrategyType, - }, - Output: buildv1.BuildOutput{ - To: &corev1.ObjectReference{ - Name: i.FinalImage.Pullspec, - Kind: "DockerImage", - }, - PushSecret: &i.FinalImage.PullSecret, - ImageLabels: []buildv1.ImageLabel{ - {Name: "io.openshift.machineconfig.pool", Value: i.Pool.Name}, - }, - }, - }, - }, - } -} - // Creates a custom image build pod to build the final OS image with all // ConfigMaps / Secrets / etc. wired into it. func (i ImageBuildRequest) toBuildPod() *corev1.Pod { @@ -274,7 +207,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { }, { Name: "TAG", - Value: i.FinalImage.Pullspec, + Value: i.MachineOSConfig.Status.CurrentImagePullspec, }, { Name: "BASE_IMAGE_PULL_CREDS", @@ -344,7 +277,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { // contains the SHA256 from the container registry, and stores this // in a ConfigMap which is consumed after the pod stops. Name: "image-build", - Image: i.BaseImage.Pullspec, + Image: i.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec, Env: env, Command: append(command, podmanBuildScript), ImagePullPolicy: corev1.PullIfNotPresent, @@ -387,7 +320,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { Name: "base-image-pull-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.BaseImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildInputs.BaseImagePullSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -402,7 +335,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { Name: "final-image-push-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.FinalImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildOutputs.CurrentImagePullSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -445,7 +378,7 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }, { Name: "TAG", - Value: i.FinalImage.Pullspec, + Value: i.MachineOSConfig.Status.CurrentImagePullspec, }, { Name: "BASE_IMAGE_PULL_CREDS", @@ -490,6 +423,164 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }, } + volumes := []corev1.Volume{ + { + // Provides the rendered Dockerfile. + Name: "dockerfile", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + { + // Provides the rendered MachineConfig in a gzipped / base64-encoded + // format. + Name: "machineconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + }, + }, + }, + { + // Provides the credentials needed to pull the base OS image. + Name: "base-image-pull-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.MachineOSConfig.Spec.BuildInputs.BaseImagePullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides the credentials needed to push the final OS image. + Name: "final-image-push-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.MachineOSConfig.Spec.BuildInputs.RenderedImagePushSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides a way for the "image-build" container to signal that it + // finished so that the "wait-for-done" container can retrieve the + // iamge SHA. + Name: "done", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + { + // This provides a dedicated place for Buildah to store / cache its + // images during the build. This seems to be required for the build-time + // volume mounts to work correctly, most likely due to an issue with + // SELinux that I have yet to figure out. Despite being called a cache + // directory, it gets removed whenever the build pod exits + Name: "buildah-cache", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + } + + // Octal: 0755. + var mountMode int32 = 493 + + // If the etc-pki-entitlement secret is found, mount it into the build pod. + if i.HasEtcPkiEntitlementKeys { + mountPoint := "/etc/pki/entitlement" + + env = append(env, corev1.EnvVar{ + Name: "ETC_PKI_ENTITLEMENT_MOUNTPOINT", + Value: mountPoint, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: etcPkiEntitlementSecretName, + MountPath: mountPoint, + }) + + volumes = append(volumes, corev1.Volume{ + Name: etcPkiEntitlementSecretName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &mountMode, + SecretName: etcPkiEntitlementSecretName, + }, + }, + }) + } + + // If the etc-yum-repos-d ConfigMap is found, mount it into the build pod. + if i.HasEtcYumReposDConfigs { + mountPoint := "/etc/yum.repos.d" + + env = append(env, corev1.EnvVar{ + Name: "ETC_YUM_REPOS_D_MOUNTPOINT", + Value: mountPoint, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: etcYumReposDConfigMapName, + MountPath: mountPoint, + }) + + volumes = append(volumes, corev1.Volume{ + Name: etcYumReposDConfigMapName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &mountMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: etcYumReposDConfigMapName, + }, + }, + }, + }) + } + + // If the etc-pki-rpm-gpg secret is found, mount it into the build pod. + if i.HasEtcPkiRpmGpgKeys { + mountPoint := "/etc/pki/rpm-gpg" + + env = append(env, corev1.EnvVar{ + Name: "ETC_PKI_RPM_GPG_MOUNTPOINT", + Value: mountPoint, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: etcPkiRpmGpgSecretName, + MountPath: mountPoint, + }) + + volumes = append(volumes, corev1.Volume{ + Name: etcPkiRpmGpgSecretName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &mountMode, + SecretName: etcPkiRpmGpgSecretName, + }, + }, + }) + } + // TODO: We need pull creds with permissions to pull the base image. By // default, none of the MCO pull secrets can directly pull it. We can use the // pull-secret creds from openshift-config to do that, though we'll need to @@ -513,7 +604,11 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Command: append(command, buildahBuildScript), ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, - VolumeMounts: volumeMounts, + // Only attach the buildah-cache volume mount to the buildah container. + VolumeMounts: append(volumeMounts, corev1.VolumeMount{ + Name: "buildah-cache", + MountPath: "/home/build/.local/share/containers", + }), }, { // This container waits for the aforementioned container to finish @@ -524,114 +619,65 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "wait-for-done", Env: env, Command: append(command, waitScript), - Image: i.BaseImage.Pullspec, + Image: i.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, VolumeMounts: volumeMounts, }, }, ServiceAccountName: "machine-os-builder", - Volumes: []corev1.Volume{ - { - // Provides the rendered Dockerfile. - Name: "dockerfile", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: i.getDockerfileConfigMapName(), - }, - }, - }, - }, - { - // Provides the rendered MachineConfig in a gzipped / base64-encoded - // format. - Name: "machineconfig", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: i.getMCConfigMapName(), - }, - }, - }, - }, - { - // Provides the credentials needed to pull the base OS image. - Name: "base-image-pull-creds", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: i.BaseImage.PullSecret.Name, - Items: []corev1.KeyToPath{ - { - Key: corev1.DockerConfigJsonKey, - Path: "config.json", - }, - }, - }, - }, - }, - { - // Provides the credentials needed to push the final OS image. - Name: "final-image-push-creds", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: i.FinalImage.PullSecret.Name, - Items: []corev1.KeyToPath{ - { - Key: corev1.DockerConfigJsonKey, - Path: "config.json", - }, - }, - }, - }, - }, - { - // Provides a way for the "image-build" container to signal that it - // finished so that the "wait-for-done" container can retrieve the - // iamge SHA. - Name: "done", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - Medium: corev1.StorageMediumMemory, - }, - }, - }, - }, + Volumes: volumes, }, } } // Constructs a common metav1.ObjectMeta object with the namespace, labels, and annotations set. func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { - return metav1.ObjectMeta{ + objectMeta := metav1.ObjectMeta{ Name: name, Namespace: ctrlcommon.MCONamespace, Labels: map[string]string{ ctrlcommon.OSImageBuildPodLabel: "", - targetMachineConfigPoolLabel: i.Pool.Name, - desiredConfigLabel: i.Pool.Spec.Configuration.Name, + targetMachineConfigPoolLabel: i.MachineOSConfig.Spec.MachineConfigPool.Name, + desiredConfigLabel: i.MachineOSBuild.Spec.DesiredConfig.Name, }, Annotations: map[string]string{ mcPoolAnnotation: "", }, } + + hasOptionalBuildInputTemplate := "machineconfiguration.openshift.io/has-%s" + + if i.HasEtcPkiEntitlementKeys { + objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcPkiEntitlementSecretName)] = "" + } + + if i.HasEtcYumReposDConfigs { + objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcYumReposDConfigMapName)] = "" + } + + if i.HasEtcPkiRpmGpgKeys { + objectMeta.Annotations[fmt.Sprintf(hasOptionalBuildInputTemplate, etcPkiRpmGpgSecretName)] = "" + } + + return objectMeta } // Computes the Dockerfile ConfigMap name based upon the MachineConfigPool name. func (i ImageBuildRequest) getDockerfileConfigMapName() string { - return fmt.Sprintf("dockerfile-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("dockerfile-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } // Computes the MachineConfig ConfigMap name based upon the MachineConfigPool name. func (i ImageBuildRequest) getMCConfigMapName() string { - return fmt.Sprintf("mc-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("mc-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } // Computes the build name based upon the MachineConfigPool name. func (i ImageBuildRequest) getBuildName() string { - return fmt.Sprintf("build-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("build-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } func (i ImageBuildRequest) getDigestConfigMapName() string { - return fmt.Sprintf("digest-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("digest-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } diff --git a/pkg/controller/build/image_build_request_test.go b/pkg/controller/build/image_build_request_test.go index 0e4dc1d938..848ff6f063 100644 --- a/pkg/controller/build/image_build_request_test.go +++ b/pkg/controller/build/image_build_request_test.go @@ -15,7 +15,7 @@ func TestImageBuildRequest(t *testing.T) { osImageURLConfigMap := getOSImageURLConfigMap() onClusterBuildConfigMap := getOnClusterBuildConfigMap() - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, @@ -72,7 +72,7 @@ func TestImageBuildRequestMissingExtensionsImage(t *testing.T) { delete(osImageURLConfigMap.Data, baseOSExtensionsContainerImageConfigKey) - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, @@ -94,7 +94,7 @@ func TestImageBuildRequestWithCustomDockerfile(t *testing.T) { "worker": "FROM configs AS final\nRUN dnf install -y python3", }) - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go index 6f3692d2d5..1167066d60 100644 --- a/pkg/controller/build/pod_build_controller.go +++ b/pkg/controller/build/pod_build_controller.go @@ -6,7 +6,7 @@ import ( "strings" "time" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" @@ -186,48 +186,27 @@ func (ctrl *PodBuildController) Run(ctx context.Context, workers int) { <-ctx.Done() } -// Gets the final image pullspec by retrieving the ConfigMap that the build pod -// creates from the Buildah digestfile. -func (ctrl *PodBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { - onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) - if err != nil { - return "", err - } - - finalImageInfo := newFinalImageInfo(&buildInputs{ - onClusterBuildConfig: onClusterBuildConfigMap, - }) - ibr := newImageBuildRequest(pool) - - digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) - if err != nil { - return "", err - } - - return parseImagePullspec(finalImageInfo.Pullspec, digestConfigMap.Data["digest"]) -} - // Deletes the underlying build pod. -func (ctrl *PodBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { +func (ctrl *PodBuildController) DeleteBuildObject(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) error { // We want to ignore when a pod or ConfigMap is deleted if it is not found. // This is because when a pool is opted out of layering *after* a successful // build, no pod nor ConfigMap will remain. So we want to be able to // idempotently call this function in that case. return aggerrors.AggregateGoroutines( func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{})) }, func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDigestConfigMapName(), metav1.DeleteOptions{})) }, ) } // Determines if a build is currently running by looking for a corresponding pod. -func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { - ibr := newImageBuildRequest(pool) +func (ctrl *PodBuildController) IsBuildRunning(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (bool, error) { + ibr := newImageBuildRequest(mosc, mosb) // First check if we have a build in progress for this MachineConfigPool and rendered config. _, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) @@ -240,7 +219,7 @@ func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) ( // Starts a new build pod, assuming one is not found first. In that case, it returns an object reference to the preexisting build pod. func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { - targetMC := ibr.Pool.Spec.Configuration.Name + targetMC := ibr.MachineOSBuild.Spec.DesiredConfig.Name // TODO: Find a constant for this: if !strings.HasPrefix(targetMC, "rendered-") { @@ -255,20 +234,20 @@ func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.Objec // This means we found a preexisting build pod. if pod != nil && err == nil && hasAllRequiredOSBuildLabels(pod.Labels) { - klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.Pool.Name) + klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.MachineOSConfig.Spec.MachineConfigPool.Name) return toObjectRef(pod), nil } - klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Starting build for pool %s", ibr.MachineOSConfig.Spec.MachineConfigPool.Name) klog.Infof("Build pod name: %s", ibr.getBuildName()) - klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.MachineOSConfig.Status.CurrentImagePullspec, ibr.MachineOSConfig.Spec.BuildOutputs.CurrentImagePullSecret.Name) pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuildPod(), metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("could not create build pod: %w", err) } - klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, pod.Name) + klog.Infof("Build started for pool %s in %s!", ibr.MachineOSConfig.Spec.MachineConfigPool.Name, pod.Name) return toObjectRef(pod), nil } diff --git a/pkg/controller/build/pool_state.go b/pkg/controller/build/pool_state.go deleted file mode 100644 index ae085df2b9..0000000000 --- a/pkg/controller/build/pool_state.go +++ /dev/null @@ -1,243 +0,0 @@ -package build - -import ( - "fmt" - - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/machine-config-operator/pkg/apihelpers" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - corev1 "k8s.io/api/core/v1" -) - -type poolState struct { - *ctrlcommon.LayeredPoolState - pool *mcfgv1.MachineConfigPool -} - -func newPoolState(pool *mcfgv1.MachineConfigPool) *poolState { - copied := pool.DeepCopy() - return &poolState{ - LayeredPoolState: ctrlcommon.NewLayeredPoolState(copied), - pool: copied, - } -} - -// Returns the name of the MachineConfigPool. -func (p *poolState) Name() string { - return p.pool.GetName() -} - -// Returns the name of the current MachineConfig. -func (p *poolState) CurrentMachineConfig() string { - return p.pool.Spec.Configuration.Name -} - -// Returns a deep-copy of the MachineConfigPool with any changes made, -// propagating spec.Configurtion to status.Configuration in the process. -func (p *poolState) MachineConfigPool() *mcfgv1.MachineConfigPool { - out := p.pool.DeepCopy() - out.Spec.Configuration.DeepCopyInto(&out.Status.Configuration) - return out -} - -// Sets the image pullspec annotation. -func (p *poolState) SetImagePullspec(pullspec string) { - if p.pool.Annotations == nil { - p.pool.Annotations = map[string]string{} - } - - p.pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = pullspec -} - -// Clears the image pullspec annotation. -func (p *poolState) ClearImagePullspec() { - if p.pool.Annotations == nil { - return - } - - delete(p.pool.Annotations, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) -} - -// Deletes a given build object reference by its name. -func (p *poolState) DeleteBuildRefByName(name string) { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef.Name != name && !p.isBuildObjectRef(objRef) - }) -} - -// Determines if a MachineConfigPool contains a reference to a Build or custom -// build pod for its current rendered MachineConfig. -func (p *poolState) HasBuildObjectForCurrentMachineConfig() bool { - return p.HasBuildObjectRefName(newImageBuildRequest(p.pool).getBuildName()) -} - -// Determines if a MachineConfigPool has a build object reference given its -// name. -func (p *poolState) HasBuildObjectRefName(name string) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src.Name == name && p.isBuildObjectRef(src) { - return true - } - } - - return false -} - -// Searches for any object reference. -func (p *poolState) HasObjectRef(objRef corev1.ObjectReference) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src == objRef { - return true - } - } - - return false -} - -// Searches only for build object references. -func (p *poolState) HasBuildObjectRef(objRef corev1.ObjectReference) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src == objRef && p.isBuildObjectRef(src) { - return true - } - } - - return false -} - -// Deletes the build object reference for the build belonging to the current MachineConfig. -func (p *poolState) DeleteBuildRefForCurrentMachineConfig() { - p.DeleteBuildRefByName(newImageBuildRequest(p.pool).getBuildName()) -} - -// Deletes all build object references. -func (p *poolState) DeleteAllBuildRefs() { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef.Kind == "MachineConfig" - }) -} - -// Deletes a given object reference. -func (p *poolState) DeleteObjectRef(toDelete corev1.ObjectReference) { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef != toDelete - }) -} - -// Gets all build object references. -func (p *poolState) GetBuildObjectRefs() []corev1.ObjectReference { - return p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return p.isBuildObjectRef(objRef) - }) -} - -// Adds a build object reference. Returns an error if a build object reference -// already exists. -func (p *poolState) AddBuildObjectRef(objRef corev1.ObjectReference) error { - if !p.isBuildObjectRef(objRef) { - return fmt.Errorf("%s not a valid build object kind", objRef.Kind) - } - - buildRefs := p.GetBuildObjectRefs() - if len(buildRefs) != 0 { - return fmt.Errorf("cannot add build ObjectReference, found: %v", buildRefs) - } - - p.pool.Spec.Configuration.Source = append(p.pool.Spec.Configuration.Source, objRef) - return nil -} - -// Clears all build object conditions. -func (p *poolState) ClearAllBuildConditions() { - p.pool.Status.Conditions = clearAllBuildConditions(p.pool.Status.Conditions) -} - -// Idempotently sets the supplied build conditions. -func (p *poolState) SetBuildConditions(conditions []mcfgv1.MachineConfigPoolCondition) { - for _, condition := range conditions { - condition := condition - currentCondition := apihelpers.GetMachineConfigPoolCondition(p.pool.Status, condition.Type) - if currentCondition != nil && isConditionEqual(*currentCondition, condition) { - continue - } - - mcpCondition := apihelpers.NewMachineConfigPoolCondition(condition.Type, condition.Status, condition.Reason, condition.Message) - apihelpers.SetMachineConfigPoolCondition(&p.pool.Status, *mcpCondition) - } -} - -// Gets all build conditions. -func (p *poolState) GetAllBuildConditions() []mcfgv1.MachineConfigPoolCondition { - buildConditions := []mcfgv1.MachineConfigPoolCondition{} - - for _, condition := range p.pool.Status.Conditions { - if p.isBuildCondition(condition) { - buildConditions = append(buildConditions, condition) - } - } - - return buildConditions -} - -func (p *poolState) isBuildCondition(cond mcfgv1.MachineConfigPoolCondition) bool { - for _, condType := range getMachineConfigPoolBuildConditions() { - if cond.Type == condType { - return true - } - } - - return false -} - -func (p *poolState) isBuildObjectRef(objRef corev1.ObjectReference) bool { - if objRef.Kind == "Pod" { - return true - } - - if objRef.Kind == "Build" { - return true - } - - return false -} - -func (p *poolState) getFilteredObjectRefs(filterFunc func(objRef corev1.ObjectReference) bool) []corev1.ObjectReference { - refs := []corev1.ObjectReference{} - - for _, src := range p.pool.Spec.Configuration.Source { - if filterFunc(src) { - refs = append(refs, src) - } - } - - return refs -} - -// Determines if two conditions are equal. Note: I purposely do not include the -// timestamp in the equality test, since we do not directly set it. -func isConditionEqual(cond1, cond2 mcfgv1.MachineConfigPoolCondition) bool { - return cond1.Type == cond2.Type && - cond1.Status == cond2.Status && - cond1.Message == cond2.Message && - cond1.Reason == cond2.Reason -} - -func clearAllBuildConditions(inConditions []mcfgv1.MachineConfigPoolCondition) []mcfgv1.MachineConfigPoolCondition { - conditions := []mcfgv1.MachineConfigPoolCondition{} - - for _, condition := range inConditions { - buildConditionFound := false - for _, buildConditionType := range getMachineConfigPoolBuildConditions() { - if condition.Type == buildConditionType { - buildConditionFound = true - break - } - } - - if !buildConditionFound { - conditions = append(conditions, condition) - } - } - - return conditions -} diff --git a/pkg/controller/build/pool_state_test.go b/pkg/controller/build/pool_state_test.go deleted file mode 100644 index b6eb3e3ac4..0000000000 --- a/pkg/controller/build/pool_state_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package build - -import ( - "testing" - - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/machine-config-operator/test/helpers" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" -) - -func TestPoolState(t *testing.T) { - t.Parallel() - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - mcp.Spec.Configuration.Source = []corev1.ObjectReference{ - { - Name: "mc-1", - Kind: "MachineConfig", - }, - { - Name: "mc-2", - Kind: "MachineConfig", - }, - } - - assert.NotEqual(t, mcp.Spec.Configuration, mcp.Status.Configuration) - - ps := newPoolState(mcp) - - ps.SetImagePullspec("registry.host.com/org/repo:tag") - assert.True(t, ps.HasOSImage()) - assert.Equal(t, "registry.host.com/org/repo:tag", ps.GetOSImage()) -} - -func TestPoolStateBuildRefs(t *testing.T) { - t.Parallel() - - mcRefs := []corev1.ObjectReference{ - { - Name: "mc-1", - Kind: "MachineConfig", - }, - { - Name: "mc-2", - Kind: "MachineConfig", - }, - } - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, mcRefs...) - - assert.NotEqual(t, mcp.Spec.Configuration, mcp.Status.Configuration) - - buildPodRef := corev1.ObjectReference{ - Kind: "Pod", - Name: "build-pod", - } - - buildRef := corev1.ObjectReference{ - Kind: "Build", - Name: "build", - } - - buildRefTests := []struct { - buildRef corev1.ObjectReference - errExpected bool - }{ - { - buildRef: buildPodRef, - }, - { - buildRef: buildRef, - }, - { - buildRef: corev1.ObjectReference{ - Kind: "MachineConfig", - Name: "mc-1", - }, - errExpected: true, - }, - } - - for _, buildRefTest := range buildRefTests { - t.Run("BuildRefTest", func(t *testing.T) { - ps := newPoolState(mcp) - if buildRefTest.errExpected { - assert.Error(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - return - } - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef), "initial insertion should not error") - assert.Equal(t, append(mcp.Spec.Configuration.Source, buildRefTest.buildRef), ps.MachineConfigPool().Spec.Configuration.Source) - assert.Equal(t, append(mcp.Spec.Configuration.Source, buildRefTest.buildRef), ps.MachineConfigPool().Status.Configuration.Source) - - assert.NotEqual(t, mcp.Spec.Configuration.Source, ps.MachineConfigPool().Spec.Configuration.Source, "should not mutate the underlying MCP") - assert.NotEqual(t, mcp.Status.Configuration.Source, ps.MachineConfigPool().Status.Configuration.Source, "should not mutate the underlying MCP") - - assert.Equal(t, []corev1.ObjectReference{buildRefTest.buildRef}, ps.GetBuildObjectRefs(), "expected build refs to include the inserted ref") - - assert.True(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.True(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.False(t, ps.HasBuildObjectRef(mcRefs[0]), "MachineConfigs should not match build objects") - assert.False(t, ps.HasBuildObjectRefName(mcRefs[0].Name), "MachineConfigs should not match build objects") - - assert.Error(t, ps.AddBuildObjectRef(buildPodRef), "should not be able to insert more than one build ref") - assert.Error(t, ps.AddBuildObjectRef(buildRef), "should not be able to insert more than one build ref") - ps.DeleteObjectRef(buildRefTest.buildRef) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - ps.DeleteBuildRefByName(buildRefTest.buildRef.Name) - assert.False(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.False(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - ps.DeleteAllBuildRefs() - assert.False(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.False(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - outMCP := ps.MachineConfigPool() - assert.Equal(t, outMCP.Spec.Configuration, outMCP.Status.Configuration) - }) - } -} - -func TestPoolStateConditions(t *testing.T) { - t.Parallel() - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - ps := newPoolState(mcp) - - conditionTests := []struct { - condType mcfgv1.MachineConfigPoolConditionType - checkFunc func() bool - }{ - { - condType: mcfgv1.MachineConfigPoolBuildFailed, - checkFunc: ps.IsBuildFailure, - }, - { - condType: mcfgv1.MachineConfigPoolBuildPending, - checkFunc: ps.IsBuildPending, - }, - { - condType: mcfgv1.MachineConfigPoolBuildSuccess, - checkFunc: ps.IsBuildSuccess, - }, - { - condType: mcfgv1.MachineConfigPoolBuilding, - checkFunc: ps.IsBuilding, - }, - } - - for _, condTest := range conditionTests { - t.Run(string(condTest.condType), func(t *testing.T) { - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ - { - Status: corev1.ConditionTrue, - Type: condTest.condType, - }, - }) - - assert.True(t, condTest.checkFunc()) - - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ - { - Status: corev1.ConditionFalse, - Type: condTest.condType, - }, - }) - - assert.False(t, condTest.checkFunc()) - }) - } - - ps.ClearAllBuildConditions() - buildConditionTypes := getMachineConfigPoolBuildConditions() - for _, condition := range mcp.Status.Conditions { - for _, conditionType := range buildConditionTypes { - if conditionType == condition.Type { - t.Fatalf("expected not to find any build conditions, found: %v", conditionType) - } - } - } -} diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index a094c347ea..fbf090a6ae 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "reflect" + "sort" "strings" "text/template" @@ -1168,13 +1169,6 @@ func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotati n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) } -func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { - if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { - return true - } - return false -} - func DoARebuild(pool *mcfgv1.MachineConfigPool) bool { _, ok := pool.Labels[RebuildPoolLabel] return ok diff --git a/pkg/controller/common/layered_node_state.go b/pkg/controller/common/layered_node_state.go index 79c9e4e116..49d6a0f1e9 100644 --- a/pkg/controller/common/layered_node_state.go +++ b/pkg/controller/common/layered_node_state.go @@ -4,6 +4,7 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" corev1 "k8s.io/api/core/v1" ) @@ -26,8 +27,8 @@ func NewLayeredNodeState(n *corev1.Node) *LayeredNodeState { // Augements the isNodeDoneAt() check with determining if the current / desired // image annotations match the pools' values. -func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool) bool { - return isNodeDoneAt(l.node, mcp) && l.isDesiredImageEqualToPool(mcp) && l.isCurrentImageEqualToPool(mcp) +func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return isNodeDoneAt(l.node, mcp) && l.isDesiredImageEqualToPool(mcp, layered) && l.isCurrentImageEqualToPool(mcp, layered) } // The original behavior of getUnavailableMachines is: getUnavailableMachines @@ -39,14 +40,27 @@ func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool) bool { // // This augments this check by determining if the desired iamge annotation is // equal to what the pool expects. -func (l *LayeredNodeState) IsUnavailable(mcp *mcfgv1.MachineConfigPool) bool { - return isNodeUnavailable(l.node) && l.isDesiredImageEqualToPool(mcp) +func (l *LayeredNodeState) IsUnavailable(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return isNodeUnavailable(l.node) && l.isDesiredImageEqualToPool(mcp, layered) } // Checks that the desired machineconfig and image annotations equal the ones // specified by the pool. -func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp) +func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp, layered) +} + +func (l *LayeredNodeState) IsDesiredEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return l.isDesiredImageEqualToBuild(mosc) && l.isDesiredMachineConfigEqualToBuild(mosb) +} + +func (l *LayeredNodeState) isDesiredImageEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig) bool { + return l.isImageAnnotationEqualToBuild(daemonconsts.DesiredImageAnnotationKey, mosc) +} + +func (l *LayeredNodeState) isDesiredMachineConfigEqualToBuild(mosb *mcfgv1alpha1.MachineOSBuild) bool { + return l.node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] == mosb.Spec.DesiredConfig.Name + } // Compares the MachineConfig specified by the MachineConfigPool to the one @@ -57,26 +71,26 @@ func (l *LayeredNodeState) isDesiredMachineConfigEqualToPool(mcp *mcfgv1.Machine // Determines if the nodes desired image is equal to the expected value from // the MachineConfigPool. -func (l *LayeredNodeState) isDesiredImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isImageAnnotationEqualToPool(daemonconsts.DesiredImageAnnotationKey, mcp) +func (l *LayeredNodeState) isDesiredImageEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.DesiredImageAnnotationKey, mcp, layered) } // Determines if the nodes current image is equal to the expected value from // the MachineConfigPool. -func (l *LayeredNodeState) isCurrentImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isImageAnnotationEqualToPool(daemonconsts.CurrentImageAnnotationKey, mcp) +func (l *LayeredNodeState) isCurrentImageEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.CurrentImageAnnotationKey, mcp, layered) } // Determines if a nodes' image annotation is equal to the expected value from // the MachineConfigPool. If the pool is layered, this value should equal the // OS image value, if the value is available. If the pool is not layered, then // any image annotations should not be present on the node. -func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1.MachineConfigPool) bool { +func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1.MachineConfigPool, layered bool) bool { lps := NewLayeredPoolState(mcp) val, ok := l.node.Annotations[anno] - if lps.IsLayered() && lps.HasOSImage() { + if lps.HasOSImage() { // If the pool is layered and has an OS image, check that it equals the // node annotations' value. return lps.GetOSImage() == val @@ -86,6 +100,21 @@ func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1 return val == "" || !ok } +func (l *LayeredNodeState) isImageAnnotationEqualToBuild(anno string, mosc *mcfgv1alpha1.MachineOSConfig) bool { + mosbs := NewMachineOSConfigState(mosc) + + val, ok := l.node.Annotations[anno] + + if mosbs.HasOSImage() { + // If the pool is layered and has an OS image, check that it equals the + // node annotations' value. + return mosbs.GetOSImage() == val + } + + // If the pool is not layered, this annotation should not exist. + return val == "" || !ok +} + // Sets the desired annotations from the MachineConfigPool, according to the // following rules: // @@ -98,7 +127,7 @@ func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1 // // Note: This will create a deep copy of the node object first to avoid // mutating any underlying caches. -func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool) { +func (l *LayeredNodeState) SetDesiredStateFromPool(layered bool, mcp *mcfgv1.MachineConfigPool) { node := l.Node() if node.Annotations == nil { node.Annotations = map[string]string{} @@ -108,7 +137,7 @@ func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool lps := NewLayeredPoolState(mcp) - if lps.IsLayered() && lps.HasOSImage() { + if lps.HasOSImage() { node.Annotations[daemonconsts.DesiredImageAnnotationKey] = lps.GetOSImage() } else { delete(node.Annotations, daemonconsts.DesiredImageAnnotationKey) @@ -117,6 +146,24 @@ func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool l.node = node } +func (l *LayeredNodeState) SetDesiredStateFromMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) { + node := l.Node() + if node.Annotations == nil { + node.Annotations = map[string]string{} + } + + node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] = mosb.Spec.DesiredConfig.Name + moscs := NewMachineOSConfigState(mosc) + + if moscs.HasOSImage() { + node.Annotations[daemonconsts.DesiredImageAnnotationKey] = moscs.GetOSImage() + } else { + delete(node.Annotations, daemonconsts.DesiredImageAnnotationKey) + } + + l.node = node +} + // Returns a deep copy of the underlying node object. func (l *LayeredNodeState) Node() *corev1.Node { return l.node.DeepCopy() diff --git a/pkg/controller/common/layered_pool_state.go b/pkg/controller/common/layered_pool_state.go index ef7925e582..497ae016a8 100644 --- a/pkg/controller/common/layered_pool_state.go +++ b/pkg/controller/common/layered_pool_state.go @@ -18,20 +18,6 @@ func NewLayeredPoolState(pool *mcfgv1.MachineConfigPool) *LayeredPoolState { return &LayeredPoolState{pool: pool} } -// Determines if a MachineConfigPool is layered by looking for the layering -// enabled label. -func (l *LayeredPoolState) IsLayered() bool { - if l.pool == nil { - return false - } - - if l.pool.Labels == nil { - return false - } - - return IsLayeredPool(l.pool) -} - // Returns the OS image, if one is present. func (l *LayeredPoolState) GetOSImage() string { osImage := l.pool.Annotations[ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] diff --git a/pkg/controller/common/layered_pool_state_test.go b/pkg/controller/common/layered_pool_state_test.go index 3c44f60168..d38ed7ecc7 100644 --- a/pkg/controller/common/layered_pool_state_test.go +++ b/pkg/controller/common/layered_pool_state_test.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -func TestLayeredPoolState(t *testing.T) { +func TestMachineOSBuildState(t *testing.T) { t.Parallel() tests := []struct { @@ -83,7 +83,7 @@ func TestLayeredPoolState(t *testing.T) { apihelpers.SetMachineConfigPoolCondition(&test.pool.Status, *cond) } - lps := NewLayeredPoolState(test.pool) + lps := NewMachineOSBuildState(test.pool) assert.Equal(t, test.isLayered, lps.IsLayered(), "is layered mismatch %s", spew.Sdump(test.pool.Labels)) assert.Equal(t, test.hasOSImage, lps.HasOSImage(), "has OS image mismatch %s", spew.Sdump(test.pool.Annotations)) diff --git a/pkg/controller/common/mos_state.go b/pkg/controller/common/mos_state.go new file mode 100644 index 0000000000..83981cb96f --- /dev/null +++ b/pkg/controller/common/mos_state.go @@ -0,0 +1,221 @@ +package common + +import ( + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This is intended to provide a singular way to interrogate MachineConfigPool +// objects to determine if they're in a specific state or not. The eventual +// goal is to use this to mutate the MachineConfigPool object to provide a +// single and consistent interface for that purpose. In this current state, we +// do not perform any mutations. +type MachineOSBuildState struct { + Build *mcfgv1alpha1.MachineOSBuild +} + +type MachineOSConfigState struct { + Config *mcfgv1alpha1.MachineOSConfig +} + +func NewMachineOSConfigState(mosc *mcfgv1alpha1.MachineOSConfig) *MachineOSConfigState { + return &MachineOSConfigState{ + Config: mosc, + } +} + +func NewMachineOSBuildState(mosb *mcfgv1alpha1.MachineOSBuild) *MachineOSBuildState { + return &MachineOSBuildState{ + Build: mosb, + } +} + +// Returns the OS image, if one is present. +func (l *MachineOSConfigState) GetOSImage() string { + osImage := l.Config.Status.CurrentImagePullspec + return osImage +} + +// Determines if an OS image build is a success. +func (l *MachineOSBuildState) IsBuildSuccess() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildSucceeded) +} + +// Determines if an OS image build is pending. +func (l *MachineOSBuildState) IsBuildPending() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) +} + +// Determines if an OS image build is in progress. +func (l *MachineOSBuildState) IsBuilding() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) +} + +// Determines if an OS image build has failed. +func (l *MachineOSBuildState) IsBuildFailure() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildFailed) +} + +// Determines if an OS image build has failed. +func (l *MachineOSBuildState) IsBuildInterrupted() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildInterrupted) +} + +func (l *MachineOSBuildState) IsAnyDegraded() bool { + condTypes := []mcfgv1alpha1.BuildProgress{ + mcfgv1alpha1.MachineOSBuildFailed, + mcfgv1alpha1.MachineOSBuildInterrupted, + } + + for _, condType := range condTypes { + if apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, condType) { + return true + } + } + + return false +} + +// Idempotently sets the supplied build conditions. +func (b *MachineOSBuildState) SetBuildConditions(conditions []metav1.Condition) { + for _, condition := range conditions { + condition := condition + currentCondition := apihelpers.GetMachineOSBuildCondition(b.Build.Status, mcfgv1alpha1.BuildProgress(condition.Type)) + if currentCondition != nil && isConditionEqual(*currentCondition, condition) { + continue + } + + mosbCondition := apihelpers.NewMachineOSBuildCondition(condition.Type, condition.Status, condition.Reason, condition.Message) + apihelpers.SetMachineOSBuildCondition(&b.Build.Status, *mosbCondition) + } +} + +// Determines if two conditions are equal. Note: I purposely do not include the +// timestamp in the equality test, since we do not directly set it. +func isConditionEqual(cond1, cond2 metav1.Condition) bool { + return cond1.Type == cond2.Type && + cond1.Status == cond2.Status && + cond1.Message == cond2.Message && + cond1.Reason == cond2.Reason +} + +// Determines if a given MachineConfigPool has an available OS image. Returns +// false if the annotation is missing or set to an empty string. +func (m *MachineOSConfigState) HasOSImage() bool { + val := m.Config.Status.CurrentImagePullspec + return val != "" +} + +// Clears the image pullspec annotation. +func (m *MachineOSConfigState) ClearImagePullspec() { + m.Config.Spec.BuildInputs.RenderedImagePushspec = "" + m.Config.Status.CurrentImagePullspec = "" +} + +// Clears all build object conditions. +func (m *MachineOSBuildState) ClearAllBuildConditions() { + m.Build.Status.Conditions = clearAllBuildConditions(m.Build.Status.Conditions) +} + +func clearAllBuildConditions(inConditions []metav1.Condition) []metav1.Condition { + conditions := []metav1.Condition{} + + for _, condition := range inConditions { + buildConditionFound := false + for _, buildConditionType := range getMachineConfigBuildConditions() { + if condition.Type == string(buildConditionType) { + buildConditionFound = true + break + } + } + + if !buildConditionFound { + conditions = append(conditions, condition) + } + } + + return conditions +} + +func getMachineConfigBuildConditions() []mcfgv1alpha1.BuildProgress { + return []mcfgv1alpha1.BuildProgress{ + mcfgv1alpha1.MachineOSBuildFailed, + mcfgv1alpha1.MachineOSBuildInterrupted, + mcfgv1alpha1.MachineOSBuildPrepared, + mcfgv1alpha1.MachineOSBuildSucceeded, + mcfgv1alpha1.MachineOSBuilding, + } +} + +func IsPoolAnyDegraded(pool *mcfgv1.MachineConfigPool) bool { + condTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolNodeDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + } + + for _, condType := range condTypes { + if apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, condType) { + return true + } + } + + return false +} + +// Determine if we have a config change. +func IsPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { + return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +} + +func HasBuildObjectForCurrentMachineConfig(pool *mcfgv1.MachineConfigPool, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return pool.Spec.Configuration.Name == mosb.Spec.DesiredConfig.Name +} + +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func BuildDueToPoolChange(builder interface { + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) +}, oldPool, curPool *mcfgv1.MachineConfigPool, moscNew *mcfgv1alpha1.MachineOSConfig, mosbNew *mcfgv1alpha1.MachineOSBuild) (bool, error) { + + moscState := NewMachineOSConfigState(moscNew) + mosbState := NewMachineOSBuildState(mosbNew) + + // If we don't have a layered pool, we should not build. + poolStateSuggestsBuild := canPoolBuild(curPool, moscState, mosbState) && + // If we have a config change or we're missing an image pullspec label, we + // should do a build. + (IsPoolConfigChange(oldPool, curPool) || !moscState.HasOSImage()) + + return poolStateSuggestsBuild, nil + +} + +// Checks our pool to see if we can do a build. We base this off of a few criteria: +// 1. Is the pool opted into layering? +// 2. Do we have an object reference to an in-progress build? +// 3. Is the pool degraded? +// 4. Is our build in a specific state? +// +// Returns true if we are able to build. +func canPoolBuild(pool *mcfgv1.MachineConfigPool, moscNewState *MachineOSConfigState, mosbNewState *MachineOSBuildState) bool { + // If we don't have a layered pool, we should not build. + if !IsLayeredPool(moscNewState.Config, mosbNewState.Build) { + return false + } + // If the pool is degraded, we should not build. + if IsPoolAnyDegraded(pool) { + return false + } + // If the new pool has an ongoing build, we should not build + if mosbNewState.Build != nil { + return false + } + return true +} + +func IsLayeredPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return (mosc != nil || mosb != nil) +} diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 78d705cb3f..b7d93b6fc9 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -13,11 +13,16 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" + cligoinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" cligolistersv1 "github.com/openshift/client-go/config/listers/config/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" "github.com/openshift/library-go/pkg/operator/v1helpers" "github.com/openshift/machine-config-operator/internal" @@ -85,11 +90,13 @@ type Controller struct { mcpLister mcfglistersv1.MachineConfigPoolLister nodeLister corelisterv1.NodeLister podLister corelisterv1.PodLister + mosbLister mcfglistersv1alpha1.MachineOSBuildLister ccListerSynced cache.InformerSynced mcListerSynced cache.InformerSynced mcpListerSynced cache.InformerSynced nodeListerSynced cache.InformerSynced + mosbListerSynced cache.InformerSynced schedulerList cligolistersv1.SchedulerLister schedulerListerSynced cache.InformerSynced @@ -109,6 +116,7 @@ func New( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -118,6 +126,7 @@ func New( ccInformer, mcInformer, mcpInformer, + mosbInformer, nodeInformer, podInformer, schedulerInformer, @@ -134,6 +143,7 @@ func NewWithCustomUpdateDelay( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -144,6 +154,7 @@ func NewWithCustomUpdateDelay( ccInformer, mcInformer, mcpInformer, + mosbInformer, nodeInformer, podInformer, schedulerInformer, @@ -159,6 +170,7 @@ func newController( ccInformer mcfginformersv1.ControllerConfigInformer, mcInformer mcfginformersv1.MachineConfigInformer, mcpInformer mcfginformersv1.MachineConfigPoolInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, schedulerInformer cligoinformersv1.SchedulerInformer, @@ -179,7 +191,11 @@ func newController( updateDelay: updateDelay, fgAcessor: fgAccessor, } - + mosbInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineOSBuild, + UpdateFunc: ctrl.updateMachineOSBuild, + DeleteFunc: ctrl.deleteMachineOSBuild, + }) mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: ctrl.addMachineConfigPool, UpdateFunc: ctrl.updateMachineConfigPool, @@ -208,6 +224,7 @@ func newController( ctrl.mcListerSynced = mcInformer.Informer().HasSynced ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced ctrl.nodeListerSynced = nodeInformer.Informer().HasSynced + ctrl.mosbListerSynced = mosbInformer.Informer().HasSynced ctrl.schedulerList = schedulerInformer.Lister() ctrl.schedulerListerSynced = schedulerInformer.Informer().HasSynced @@ -360,6 +377,42 @@ func (ctrl *Controller) makeMasterNodeSchedulable(node *corev1.Node) error { return nil } +func (ctrl *Controller) addMachineOSBuild(obj interface{}) { + curMOSB := obj.(*mcfgv1alpha1.MachineOSBuild) + + config, _ := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().Get(context.TODO(), curMOSB.Spec.MachineOSConfig.Name, metav1.GetOptions{}) + + mcp, _ := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + ctrl.enqueueMachineConfigPool(mcp) +} + +func (ctrl *Controller) updateMachineOSBuild(old, cur interface{}) { + curMOSB := cur.(*mcfgv1alpha1.MachineOSBuild) + + config, _ := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().Get(context.TODO(), curMOSB.Spec.MachineOSConfig.Name, metav1.GetOptions{}) + + mcp, _ := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + ctrl.enqueueMachineConfigPool(mcp) +} + +func (ctrl *Controller) deleteMachineOSBuild(obj interface{}) { + pool, ok := obj.(*mcfgv1alpha1.MachineOSBuild) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSBuild) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a MOSB %#v", obj)) + return + } + } + klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) + // TODO(abhinavdahiya): handle deletes. +} + func (ctrl *Controller) addMachineConfigPool(obj interface{}) { pool := obj.(*mcfgv1.MachineConfigPool) klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) @@ -764,28 +817,81 @@ func (ctrl *Controller) handleErr(err error, key interface{}) { // 2. If a MachineConfig changes, we should wait for the OS image build to be // ready so we can update both the nodes' desired MachineConfig and desired // image annotations simultaneously. + +func (ctrl *Controller) GetConfigAndBuild(pool *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { + var ourConfig *mcfgv1alpha1.MachineOSConfig + var ourBuild *mcfgv1alpha1.MachineOSBuild + configList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + for _, config := range configList.Items { + if config.Spec.MachineConfigPool.Name == pool.Name { + ourConfig = &config + break + } + } + + if ourConfig == nil { + return nil, nil, nil + } + + buildList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + for _, build := range buildList.Items { + if build.Spec.MachineOSConfig.Name == ourConfig.Name { + if build.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + ourBuild = &build + break + } + } + } + + return ourConfig, ourBuild, nil + +} + func (ctrl *Controller) canLayeredPoolContinue(pool *mcfgv1.MachineConfigPool) (string, bool, error) { - lps := ctrlcommon.NewLayeredPoolState(pool) - hasImage := lps.HasOSImage() - pullspec := lps.GetOSImage() + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + if mosc == nil || mosb == nil { + return "No MachineOSConfig or Build for this pool", false, nil + } + + cs := ctrlcommon.NewMachineOSConfigState(mosc) + + // annoying that we need to either a) pass aroung mosb lister and mosb lister EVERYWHERE + // or b) need to retrv one or the other depending on which is passed into the func. + // would be nice if we could a) at least centralize this code: ctrl.GetConfigAndBuild(pool) + // if we did this, we could potentially, just pass the pool around (as we used to) and shell out to the listers in the func to get the obj assoc. with the pool + //owner := mosb.OwnerReferences[0] + + bs := ctrlcommon.NewMachineOSBuildState(mosb) + + hasImage := cs.HasOSImage() + pullspec := cs.GetOSImage() if !hasImage { - return fmt.Sprintf("Image annotation %s is not set", ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey), false, nil + return "Desired Image not set in MachineOSBuild", false, nil } switch { // If the build is successful and we have the image pullspec, we can proceed // with rolling out the new OS image. - case lps.IsBuildSuccess() && hasImage: + case bs.IsBuildSuccess() && hasImage: msg := fmt.Sprintf("Image built successfully, pullspec: %s", pullspec) return msg, true, nil - case lps.IsBuildPending(): + case bs.IsBuildPending(): return "Image build pending", false, nil - case lps.IsBuilding(): + case bs.IsBuilding(): return "Image build in progress", false, nil - case lps.IsBuildFailure(): - return "Image build failed", false, fmt.Errorf("image build for MachineConfigPool %s failed", pool.Name) + case bs.IsBuildFailure(): + return "Image build failed", false, fmt.Errorf("image build for MachineConfigPool %s failed", mosb.Name) default: return "Image is not ready yet", false, nil } @@ -845,7 +951,9 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return ctrl.syncStatusOnly(pool) } - if ctrlcommon.IsLayeredPool(pool) { + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + if ok := ctrl.IsLayeredPool(pool, mosc, mosb); ok { reason, canApplyUpdates, err := ctrl.canLayeredPoolContinue(pool) if err != nil { klog.Infof("Layered pool %s encountered an error: %s", pool.Name, err) @@ -892,8 +1000,10 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { hasInProgressTaint := checkIfNodeHasInProgressTaint(node) lns := ctrlcommon.NewLayeredNodeState(node) + config, build, _ := ctrl.GetConfigAndBuild(pool) - if lns.IsDesiredEqualToPool(pool) { + layered := ctrl.IsLayeredPool(pool, config, build) + if lns.IsDesiredEqualToPool(pool, layered) { if hasInProgressTaint { if err := ctrl.removeUpdateInProgressTaint(ctx, node.Name); err != nil { err = fmt.Errorf("failed removing %s taint for node %s: %w", constants.NodeUpdateInProgressTaint.Key, node.Name, err) @@ -909,7 +1019,13 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } } } - candidates, capacity := getAllCandidateMachines(pool, nodes, maxunavail) + + // NOTE + // this needs to get triggered, the new os img needs to propogate here and be set on the candidate machines. + config, build, _ := ctrl.GetConfigAndBuild(pool) + + layered := ctrl.IsLayeredPool(pool, config, build) + candidates, capacity := getAllCandidateMachines(layered, config, build, pool, nodes, maxunavail) if len(candidates) > 0 { zones := make(map[string]bool) for _, candidate := range candidates { @@ -994,7 +1110,9 @@ func (ctrl *Controller) setClusterConfigAnnotation(nodes []*corev1.Node) error { return nil } -func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.MachineConfigPool) error { +// updateCandidateNode needs to understand MOSB +// specifically, the LayeredNodeState probably needs to understand mosb +func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, nodeName string, pool *mcfgv1.MachineConfigPool) error { return clientretry.RetryOnConflict(constants.NodeUpdateBackoff, func() error { oldNode, err := ctrl.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { @@ -1006,14 +1124,31 @@ func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.Machin } lns := ctrlcommon.NewLayeredNodeState(oldNode) - if lns.IsDesiredEqualToPool(pool) { - // If the node's desired annotations match the pool, return directly without updating the node. - klog.Infof("no update is needed") - return nil + layered := ctrl.IsLayeredPool(pool, mosc, mosb) + if mosb == nil { + if lns.IsDesiredEqualToPool(pool, layered) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + return nil + + } + lns.SetDesiredStateFromPool(layered, pool) + + } else { + if lns.IsDesiredEqualToBuild(mosc, mosb) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + return nil + } + // ensure this is happening. it might not be. + // we need to ensure the node controller is triggered at all the same times + // when using this new system + // we know the mosc+mosb can trigger one another and cause a build, but if the node controller + // can't set this anno, and subsequently cannot trigger the daemon to update, we need to rework. + lns.SetDesiredStateFromMachineOSConfig(mosc, mosb) } // Set the desired state to match the pool. - lns.SetDesiredStateFromPool(pool) newData, err := json.Marshal(lns.Node()) if err != nil { @@ -1031,8 +1166,8 @@ func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.Machin // getAllCandidateMachines returns all possible nodes which can be updated to the target config, along with a maximum // capacity. It is the reponsibility of the caller to choose a subset of the nodes given the capacity. -func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { - unavail := getUnavailableMachines(nodesInPool, pool) +func getAllCandidateMachines(layered bool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { + unavail := getUnavailableMachines(nodesInPool, pool, layered, build) if len(unavail) >= maxUnavailable { klog.Infof("No nodes available for updates") return nil, 0 @@ -1043,11 +1178,19 @@ func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*core var nodes []*corev1.Node for _, node := range nodesInPool { lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsDesiredEqualToPool(pool) { - if isNodeMCDFailing(node) { - failingThisConfig++ + if !layered { + if lns.IsDesiredEqualToPool(pool, layered) { + if isNodeMCDFailing(node) { + failingThisConfig++ + } + continue + } + } else { + if lns.IsDesiredEqualToBuild(config, build) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + continue } - continue } nodes = append(nodes, node) } @@ -1062,8 +1205,8 @@ func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*core } // getCandidateMachines returns the maximum subset of nodes which can be updated to the target config given availability constraints. -func getCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) []*corev1.Node { - nodes, capacity := getAllCandidateMachines(pool, nodesInPool, maxUnavailable) +func getCandidateMachines(pool *mcfgv1.MachineConfigPool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, nodesInPool []*corev1.Node, maxUnavailable int, layered bool) []*corev1.Node { + nodes, capacity := getAllCandidateMachines(layered, config, build, pool, nodesInPool, maxUnavailable) if uint(len(nodes)) < capacity { return nodes } @@ -1110,6 +1253,8 @@ func (ctrl *Controller) filterControlPlaneCandidateNodes(pool *mcfgv1.MachineCon return newCandidates, capacity, nil } +// SetDesiredStateFromPool in old mco explains how this works. Somehow you need to NOT FAIL if the mosb doesn't exist. So +// we still need to base this whole things on pools but IsLayeredPool == does mosb exist // updateCandidateMachines sets the desiredConfig annotation the candidate machines func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) error { if pool.Name == ctrlcommon.MachineConfigPoolMaster { @@ -1135,25 +1280,26 @@ func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, func (ctrl *Controller) setDesiredAnnotations(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node) error { eventName := "SetDesiredConfig" + config, build, _ := ctrl.GetConfigAndBuild(pool) - if ctrlcommon.IsLayeredPool(pool) { + if layered := ctrl.IsLayeredPool(pool, config, build); layered { eventName = "SetDesiredConfigAndOSImage" klog.Infof("Continuing to sync layered MachineConfigPool %s", pool.Name) } for _, node := range candidates { - ctrl.logPool(pool, "Setting node %s target to %s", node.Name, getPoolUpdateLine(pool)) - if err := ctrl.updateCandidateNode(node.Name, pool); err != nil { - return fmt.Errorf("setting desired %s for node %s: %w", getPoolUpdateLine(pool), node.Name, err) + //ctrl.logPool(pool, "Setting node %s target to %s", node.Name, getPoolUpdateLine(pool)) + if err := ctrl.updateCandidateNode(config, build, node.Name, pool); err != nil { + return fmt.Errorf("setting desired %s for node %s: %w", &pool.Spec.Configuration.Name, node.Name, err) } } if len(candidates) == 1 { candidate := candidates[0] - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Targeted node %s to %s", candidate.Name, getPoolUpdateLine(pool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Targeted node %s to %s", candidate.Name, &pool.Spec.Configuration.Name) } else { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Set target for %d nodes to %s", len(candidates), getPoolUpdateLine(pool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Set target for %d nodes to %s", len(candidates), &pool.Spec.Configuration.Name) } return nil @@ -1280,3 +1426,11 @@ func getErrorString(err error) string { } return "" } + +func (ctrl *Controller) IsLayeredPool(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + fg, err := ctrl.fgAcessor.CurrentFeatureGates() + if err != nil { + return false + } + return (mosc != nil || mosb != nil) && fg.Enabled(configv1.FeatureGateOnClusterBuild) +} diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 142eb69b25..f832086176 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -778,7 +778,7 @@ func TestGetCandidateMachines(t *testing.T) { pool := pb.MachineConfigPool() - got := getCandidateMachines(pool, test.nodes, test.progress) + got := getCandidateMachines(pool, test.nodes, test.progress, true) nodeNames := getNamesFromNodes(got) assert.Equal(t, test.expected, nodeNames) @@ -1108,13 +1108,13 @@ func TestShouldMakeProgress(t *testing.T) { mcp := test.infraPool existingNodeBuilder := helpers.NewNodeBuilder("existingNodeAtDesiredConfig").WithEqualConfigs(machineConfigV1).WithLabels(map[string]string{"node-role/worker": "", "node-role/infra": ""}) - lps := ctrlcommon.NewLayeredPoolState(mcp) + lps := ctrlcommon.NewMachineOSBuildState(mcp) if lps.IsLayered() && lps.HasOSImage() { image := lps.GetOSImage() existingNodeBuilder.WithDesiredImage(image).WithCurrentImage(image) } - lps = ctrlcommon.NewLayeredPoolState(mcpWorker) + lps = ctrlcommon.NewMachineOSBuildState(mcpWorker) if lps.IsLayered() && lps.HasOSImage() { image := lps.GetOSImage() existingNodeBuilder.WithDesiredImage(image).WithCurrentImage(image) @@ -1178,7 +1178,7 @@ func TestShouldMakeProgress(t *testing.T) { t.Fatal(err) } - lps := ctrlcommon.NewLayeredPoolState(mcp) + lps := ctrlcommon.NewMachineOSBuildState(mcp) if lps.IsLayered() && lps.HasOSImage() && lps.IsBuildSuccess() { t.Logf("expecting that the node should get the desired image annotation, desired image is: %s", lps.GetOSImage()) expNode.Annotations[daemonconsts.DesiredImageAnnotationKey] = lps.GetOSImage() diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 95ca7863c7..5078b2b974 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -8,6 +8,7 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" helpers "github.com/openshift/machine-config-operator/pkg/helpers" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" @@ -53,7 +54,10 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { machineConfigStates = append(machineConfigStates, ms) } } - newStatus := calculateStatus(machineConfigStates, cc, pool, nodes) + + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + newStatus := ctrl.calculateStatus(machineConfigStates, cc, pool, nodes, mosc, mosb) if equality.Semantic.DeepEqual(pool.Status, newStatus) { return nil } @@ -62,17 +66,19 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { newPool.Status = newStatus _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + l := ctrl.IsLayeredPool(pool, mosc, mosb) + if pool.Spec.Configuration.Name != newPool.Spec.Configuration.Name { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool, mosc, mosb, l)) } if pool.Status.Configuration.Name != newPool.Status.Configuration.Name { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Completed", "Pool %s has completed update to %s", pool.Name, getPoolUpdateLine(newPool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Completed", "Pool %s has completed update to %s", pool.Name, getPoolUpdateLine(newPool, mosc, mosb, l)) } return err } //nolint:gocyclo -func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { +func (ctrl *Controller) calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgalphav1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) mcfgv1.MachineConfigPoolStatus { certExpirys := []mcfgv1.CertExpiry{} if cconfig != nil { for _, cert := range cconfig.Status.ControllerCertificates { @@ -88,6 +94,8 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr } machineCount := int32(len(nodes)) + l := ctrl.IsLayeredPool(pool, mosc, mosb) + var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node // if we represent updating properly here, we will also represent updating properly in the CO // so this solves the cordoning RFE and the upgradeable RFE @@ -153,13 +161,14 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr // this is # 1 priority, get the upgrade states actually reporting if degradedMachineCount+readyMachineCount+unavailableMachineCount+updatingMachineCount != int32(len(nodes)) { - updatedMachines = getUpdatedMachines(pool, nodes) + + updatedMachines = getUpdatedMachines(pool, nodes, mosc, mosb, l) updatedMachineCount = int32(len(updatedMachines)) - readyMachines = getReadyMachines(pool, nodes) + readyMachines = getReadyMachines(pool, nodes, mosc, mosb, l) readyMachineCount = int32(len(readyMachines)) - unavailableMachines = getUnavailableMachines(nodes, pool) + unavailableMachines = getUnavailableMachines(nodes, pool, l, mosb) unavailableMachineCount = int32(len(unavailableMachines)) degradedMachines = getDegradedMachines(nodes) @@ -196,7 +205,7 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr if allUpdated { //TODO: update api to only have one condition regarding status of update. - updatedMsg := fmt.Sprintf("All nodes are updated with %s", getPoolUpdateLine(pool)) + updatedMsg := fmt.Sprintf("All nodes are updated with %s", getPoolUpdateLine(pool, mosc, mosb, l)) supdated := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionTrue, "", updatedMsg) apihelpers.SetMachineConfigPoolCondition(&status, *supdated) @@ -210,10 +219,10 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr supdated := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionFalse, "", "") apihelpers.SetMachineConfigPoolCondition(&status, *supdated) if pool.Spec.Paused { - supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionFalse, "", fmt.Sprintf("Pool is paused; will not update to %s", getPoolUpdateLine(pool))) + supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionFalse, "", fmt.Sprintf("Pool is paused; will not update to %s", getPoolUpdateLine(pool, mosc, mosb, l))) apihelpers.SetMachineConfigPoolCondition(&status, *supdating) } else { - supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, "", fmt.Sprintf("All nodes are updating to %s", getPoolUpdateLine(pool))) + supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, "", fmt.Sprintf("All nodes are updating to %s", getPoolUpdateLine(pool, mosc, mosb, l))) apihelpers.SetMachineConfigPoolCondition(&status, *supdating) } } @@ -247,16 +256,16 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr return status } -func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool) string { +func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) string { targetConfig := pool.Spec.Configuration.Name mcLine := fmt.Sprintf("MachineConfig %s", targetConfig) - if !ctrlcommon.IsLayeredPool(pool) { + if !layered { return mcLine } - targetImage, ok := pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] - if !ok { + targetImage := mosc.Status.CurrentImagePullspec + if len(targetImage) == 0 { return mcLine } @@ -323,12 +332,20 @@ func isNodeMCDFailing(node *corev1.Node) bool { // getUpdatedMachines filters the provided nodes to return the nodes whose // current config matches the desired config, which also matches the target config, // and the "done" flag is set. -func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) []*corev1.Node { +func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { var updated []*corev1.Node for _, node := range nodes { lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsDoneAt(pool) { - updated = append(updated, node) + if mosb != nil && mosc != nil { + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) + if layered && mosbState.IsBuildSuccess() && mosb.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + updated = append(updated, node) + } + } else { + if lns.IsDoneAt(pool, layered) { + updated = append(updated, node) + + } } } return updated @@ -336,8 +353,8 @@ func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) [] // getReadyMachines filters the provided nodes to return the nodes // that are updated and marked ready -func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) []*corev1.Node { - updated := getUpdatedMachines(pool, nodes) +func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { + updated := getUpdatedMachines(pool, nodes, mosc, mosb, layered) var ready []*corev1.Node for _, node := range updated { if isNodeReady(node) { @@ -400,12 +417,21 @@ func isNodeUnavailable(node *corev1.Node) bool { // node *may* go unschedulable in the future, so we don't want to // potentially start another node update exceeding our maxUnavailable. // Somewhat the opposite of getReadyNodes(). -func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool) []*corev1.Node { +func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool, layered bool, mosb *mcfgv1alpha1.MachineOSBuild) []*corev1.Node { var unavail []*corev1.Node for _, node := range nodes { - lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsUnavailable(pool) { - unavail = append(unavail, node) + if mosb != nil { + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) + // if node is unavail, desiredConfigs match, and the build is a success, then we are unavail. + // not sure on this one honestly + if layered && isNodeUnavailable(node) && mosb.Spec.DesiredConfig.Name == pool.Status.Configuration.Name && mosbState.IsBuildSuccess() { + unavail = append(unavail, node) + } + } else { + lns := ctrlcommon.NewLayeredNodeState(node) + if lns.IsUnavailable(pool, layered) { + unavail = append(unavail, node) + } } } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index f7f7c3e1f6..1711b9d137 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -248,6 +248,9 @@ func New( i.AddEventHandler(optr.eventHandler()) } + // this is for the FG + // informers = append(informers, moscInformer.Informer()) + optr.syncHandler = optr.sync optr.imgLister = imgInformer.Lister() diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 2d4013082c..3a279e6e8f 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -686,7 +686,6 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error { return nil } -// we need to mimic this func (optr *Operator) syncMachineConfigNodes(_ *renderConfig) error { fg, err := optr.fgAccessor.CurrentFeatureGates() if err != nil { @@ -1208,10 +1207,10 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error { if len(layeredMCPs) != 0 && (!isRunning || !correctReplicaCount) { if !correctReplicaCount { klog.Infof("Adjusting Machine OS Builder pod replica count because MachineConfigPool(s) opted into layering") - return optr.updateMachineOSBuilderDeployment(mob, 1) + return optr.updateMachineOSBuilderDeployment(mob, 1, layeredMCPs) } klog.Infof("Starting Machine OS Builder pod because MachineConfigPool(s) opted into layering") - return optr.startMachineOSBuilderDeployment(mob) + return optr.startMachineOSBuilderDeployment(mob, layeredMCPs) } // If we do not have opted-in pools and the Machine OS Builder deployment is @@ -1223,7 +1222,7 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error { // if we are in ocb, but for some reason we dont need to do an update to the deployment, we still need to validate config if len(layeredMCPs) != 0 { - return build.ValidateOnClusterBuildConfig(optr.kubeClient) + return build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs) } return nil } @@ -1242,8 +1241,8 @@ func (optr *Operator) hasCorrectReplicaCount(mob *appsv1.Deployment) bool { return false } -func (optr *Operator) updateMachineOSBuilderDeployment(mob *appsv1.Deployment, replicas int32) error { - if err := build.ValidateOnClusterBuildConfig(optr.kubeClient); err != nil { +func (optr *Operator) updateMachineOSBuilderDeployment(mob *appsv1.Deployment, replicas int32, layeredMCPs []*mcfgv1.MachineConfigPool) error { + if err := build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs); err != nil { return fmt.Errorf("could not update Machine OS Builder deployment: %w", err) } @@ -1287,8 +1286,8 @@ func (optr *Operator) isMachineOSBuilderRunning(mob *appsv1.Deployment) (bool, e } // Updates the Machine OS Builder Deployment, creating it if it does not exist. -func (optr *Operator) startMachineOSBuilderDeployment(mob *appsv1.Deployment) error { - if err := build.ValidateOnClusterBuildConfig(optr.kubeClient); err != nil { +func (optr *Operator) startMachineOSBuilderDeployment(mob *appsv1.Deployment, layeredMCPs []*mcfgv1.MachineConfigPool) error { + if err := build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs); err != nil { return fmt.Errorf("could not start Machine OS Builder: %w", err) } @@ -1324,6 +1323,22 @@ func (optr *Operator) getLayeredMachineConfigPools() ([]*mcfgv1.MachineConfigPoo return []*mcfgv1.MachineConfigPool{}, err } + if len(pools) == 0 { + moscPools := []*mcfgv1.MachineConfigPool{} + machineosconfigs, err := optr.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return []*mcfgv1.MachineConfigPool{}, err + } + for _, mosc := range machineosconfigs.Items { + mcp, err := optr.mcpLister.Get(mosc.Spec.MachineConfigPool.Name) + if err != nil { + return []*mcfgv1.MachineConfigPool{}, err + } + moscPools = append(moscPools, mcp) + } + return moscPools, nil + } + return pools, nil } diff --git a/test/e2e-techpreview/Containerfile.cowsay b/test/e2e-techpreview/Containerfile.cowsay new file mode 100644 index 0000000000..9bd23fa342 --- /dev/null +++ b/test/e2e-techpreview/Containerfile.cowsay @@ -0,0 +1,9 @@ +FROM quay.io/centos/centos:stream9 AS centos +RUN dnf install -y epel-release + +FROM configs AS final +COPY --from=centos /etc/yum.repos.d /etc/yum.repos.d +COPY --from=centos /etc/pki/rpm-gpg/RPM-GPG-KEY-* /etc/pki/rpm-gpg/ +RUN sed -i 's/\$stream/9-stream/g' /etc/yum.repos.d/centos*.repo && \ + rpm-ostree install cowsay && \ + ostree container commit diff --git a/test/e2e-techpreview/Containerfile.entitled b/test/e2e-techpreview/Containerfile.entitled new file mode 100644 index 0000000000..18e925ecc6 --- /dev/null +++ b/test/e2e-techpreview/Containerfile.entitled @@ -0,0 +1,6 @@ +FROM configs AS final + +RUN rm -rf /etc/rhsm-host && \ + rpm-ostree install buildah && \ + ln -s /run/secrets/rhsm /etc/rhsm-host && \ + ostree container commit diff --git a/test/e2e-techpreview/Containerfile.okd-fcos b/test/e2e-techpreview/Containerfile.okd-fcos new file mode 100644 index 0000000000..34db40295f --- /dev/null +++ b/test/e2e-techpreview/Containerfile.okd-fcos @@ -0,0 +1,3 @@ +FROM configs AS final +RUN rpm-ostree install cowsay && \ + ostree container commit diff --git a/test/e2e-techpreview/Containerfile.yum-repos-d b/test/e2e-techpreview/Containerfile.yum-repos-d new file mode 100644 index 0000000000..f6b9fd4d42 --- /dev/null +++ b/test/e2e-techpreview/Containerfile.yum-repos-d @@ -0,0 +1,3 @@ +FROM configs AS final +RUN rpm-ostree install buildah && \ + ostree container commit diff --git a/test/e2e-techpreview/helpers_test.go b/test/e2e-techpreview/helpers_test.go index 37afe84c64..d691a7b845 100644 --- a/test/e2e-techpreview/helpers_test.go +++ b/test/e2e-techpreview/helpers_test.go @@ -1,24 +1,49 @@ package e2e_techpreview_test import ( + "bytes" "context" + "errors" "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" "strings" "testing" "time" imagev1 "github.com/openshift/api/image/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/framework" "github.com/openshift/machine-config-operator/test/helpers" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + aggerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" + "sigs.k8s.io/yaml" ) +func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1alpha1.MachineOSConfig) func() { + _, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) + require.NoError(t, err) + + t.Logf("Created MachineOSConfig %q", mosc.Name) + + return makeIdempotentAndRegister(t, func() { + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Delete(context.TODO(), mosc.Name, metav1.DeleteOptions{})) + t.Logf("Deleted MachineOSConfig %q", mosc.Name) + }) +} + // Identifies a secret in the MCO namespace that has permissions to push to the ImageStream used for the test. func getBuilderPushSecretName(cs *framework.ClientSet) (string, error) { secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) @@ -106,25 +131,22 @@ func createSecret(t *testing.T, cs *framework.ClientSet, secret *corev1.Secret) // Copies the global pull secret from openshift-config/pull-secret into the MCO // namespace so that it can be used by the build processes. func copyGlobalPullSecret(t *testing.T, cs *framework.ClientSet) func() { - globalPullSecret, err := cs.CoreV1Interface.Secrets("openshift-config").Get(context.TODO(), "pull-secret", metav1.GetOptions{}) + return cloneSecret(t, cs, "pull-secret", "openshift-config", globalPullSecretCloneName, ctrlcommon.MCONamespace) +} + +func waitForMachineOSBuildToReachState(t *testing.T, cs *framework.ClientSet, poolName string, condFunc func(*mcfgv1alpha1.MachineOSBuild, error) (bool, error)) { + mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) require.NoError(t, err) - secretCopy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: globalPullSecretCloneName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: globalPullSecret.Data, - Type: globalPullSecret.Type, - } + mosbName := fmt.Sprintf("%s-%s-builder", poolName, mcp.Spec.Configuration.Name) - cleanup := createSecret(t, cs, secretCopy) - t.Logf("Cloned global pull secret %q into namespace %q as %q", "pull-secret", ctrlcommon.MCONamespace, secretCopy.Name) + err = wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { + mosb, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(context.TODO(), mosbName, metav1.GetOptions{}) - return makeIdempotentAndRegister(t, func() { - cleanup() - t.Logf("Deleted global pull secret copy %q", secretCopy.Name) + return condFunc(mosb, err) }) + + require.NoError(t, err, "MachineOSBuild %q did not reach desired state", mosbName) } // Waits for the target MachineConfigPool to reach a state defined in a supplied function. @@ -152,3 +174,469 @@ func makeIdempotentAndRegister(t *testing.T, cleanupFunc func()) func() { t.Cleanup(out) return out } + +// TOOD: Refactor into smaller functions. +func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { + // TODO: Instantiate this by using the label selector library. + labelSelector := "machineconfiguration.openshift.io/desiredConfig,machineconfiguration.openshift.io/buildPod,machineconfiguration.openshift.io/targetMachineConfigPool" + + cmList, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + + require.NoError(t, err) + + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + + require.NoError(t, err) + + mosbList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + + moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + + if len(cmList.Items) == 0 { + t.Logf("No ephemeral ConfigMaps to clean up") + } + + if len(podList.Items) == 0 { + t.Logf("No build pods to clean up") + } + + if len(mosbList.Items) == 0 { + t.Logf("No MachineOSBuilds to clean up") + } + + if len(moscList.Items) == 0 { + t.Logf("No MachineOSConfigs to clean up") + } + + for _, item := range cmList.Items { + t.Logf("Cleaning up ephemeral ConfigMap %q", item.Name) + require.NoError(t, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range podList.Items { + t.Logf("Cleaning up build pod %q", item.Name) + require.NoError(t, cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range moscList.Items { + t.Logf("Cleaning up MachineOSConfig %q", item.Name) + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range mosbList.Items { + t.Logf("Cleaning up MachineOSBuild %q", item.Name) + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } +} + +// Determines where to write the build logs in the event of a failure. +// ARTIFACT_DIR is a well-known env var provided by the OpenShift CI system. +// Writing to the path in this env var will ensure that any files written to +// that path end up in the OpenShift CI GCP bucket for later viewing. +// +// If this env var is not set, these files will be written to the current +// working directory. +func getBuildArtifactDir(t *testing.T) string { + artifactDir := os.Getenv("ARTIFACT_DIR") + if artifactDir != "" { + return artifactDir + } + + cwd, err := os.Getwd() + require.NoError(t, err) + return cwd +} + +// Writes any ephemeral ConfigMaps that got created as part of the build +// process to a file. Also writes the build pod spec. +func writeBuildArtifactsToFiles(t *testing.T, cs *framework.ClientSet, poolName string) { + pool, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) + require.NoError(t, err) + + err = aggerrs.NewAggregate([]error{ + writeConfigMapsToFile(t, cs, pool), + writePodSpecToFile(t, cs, pool), + writeMachineOSBuildsToFile(t, cs), + writeMachineOSConfigsToFile(t, cs), + }) + + require.NoError(t, err, "could not write build artifacts to files, got: %s", err) +} + +// Writes all MachineOSBuilds to a file. +func writeMachineOSBuildsToFile(t *testing.T, cs *framework.ClientSet) error { + mosbList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + if len(mosbList.Items) == 0 { + t.Logf("No MachineOSBuilds to write") + return nil + } + + for _, mosb := range mosbList.Items { + mosb := mosb + filename := fmt.Sprintf("%s-%s-MachineOSBuild.yaml", t.Name(), mosb.Name) + t.Logf("Writing MachineOSBuild %s to %s", mosb.Name, filename) + if err := dumpObjectToYAMLFile(t, &mosb, filename); err != nil { + return err + } + } + + return nil +} + +// Writes all MachineOSConfigs to a file. +func writeMachineOSConfigsToFile(t *testing.T, cs *framework.ClientSet) error { + moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + if len(moscList.Items) == 0 { + t.Logf("No MachineOSConfigs to write") + return nil + } + + for _, mosc := range moscList.Items { + mosc := mosc + filename := fmt.Sprintf("%s-%s-MachineOSConfig.yaml", t.Name(), mosc.Name) + t.Logf("Writing MachineOSConfig %s to %s", mosc.Name, filename) + if err := dumpObjectToYAMLFile(t, &mosc, filename); err != nil { + return err + } + } + + return nil +} + +// Writes the ephemeral ConfigMaps to a file, if found. +func writeConfigMapsToFile(t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { + configmaps := []string{ + fmt.Sprintf("dockerfile-%s", pool.Spec.Configuration.Name), + fmt.Sprintf("mc-%s", pool.Spec.Configuration.Name), + fmt.Sprintf("digest-%s", pool.Spec.Configuration.Name), + } + + dirPath := getBuildArtifactDir(t) + + for _, configmap := range configmaps { + if err := writeConfigMapToFile(t, cs, configmap, dirPath); err != nil { + return err + } + } + + return nil +} + +// Writes a given ConfigMap to a file. +func writeConfigMapToFile(t *testing.T, cs *framework.ClientSet, configmapName, dirPath string) error { + cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), configmapName, metav1.GetOptions{}) + if k8serrors.IsNotFound(err) { + t.Logf("ConfigMap %q not found, skipping retrieval", configmapName) + return nil + } + + if err != nil && !k8serrors.IsNotFound(err) { + return fmt.Errorf("could not get configmap %s: %w", configmapName, err) + } + + filename := filepath.Join(dirPath, fmt.Sprintf("%s-%s-configmap.yaml", t.Name(), configmapName)) + t.Logf("Writing configmap (%s) contents to %s", configmapName, filename) + return dumpObjectToYAMLFile(t, cm, filename) +} + +// Wrttes a pod spec to a file. +func writePodSpecToFile(t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { + dirPath := getBuildArtifactDir(t) + + podName := fmt.Sprintf("build-%s", pool.Spec.Configuration.Name) + + pod, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err == nil { + podFilename := filepath.Join(dirPath, fmt.Sprintf("%s-%s-pod.yaml", t.Name(), pod.Name)) + t.Logf("Writing spec for pod %s to %s", pod.Name, podFilename) + return dumpObjectToYAMLFile(t, pod, podFilename) + } + + if k8serrors.IsNotFound(err) { + t.Logf("Pod spec for %s not found, skipping", pod.Name) + return nil + } + + return err +} + +// Dumps a struct to the provided filename in YAML format, while ensuring that +// the filename contains both the name of the current-running test as well as +// the destination directory path. +func dumpObjectToYAMLFile(t *testing.T, obj interface{ GetName() string }, filename string) error { + dirPath := getBuildArtifactDir(t) + + // If we don't have the name of the test embedded in the filename, add it. + if !strings.Contains(filename, t.Name()) { + filename = fmt.Sprintf("%s-%s", t.Name(), filename) + } + + // If we don't have the destination directory in the filename, add it. + if !strings.Contains(filename, dirPath) { + filename = filepath.Join(dirPath, filename) + } + + out, err := yaml.Marshal(obj) + if err != nil { + return err + } + + return os.WriteFile(filename, out, 0o755) +} + +// Streams the logs from the Machine OS Builder pod containers to a set of +// files. This can provide a valuable window into how / why the e2e test suite +// failed. +func streamMachineOSBuilderPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet) error { + pods, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{ + LabelSelector: "k8s-app=machine-os-builder", + }) + + require.NoError(t, err) + + mobPod := &pods.Items[0] + return streamPodContainerLogsToFile(ctx, t, cs, mobPod) +} + +// Streams the logs for all of the containers running in the build pod. The pod +// logs can provide a valuable window into how / why a given build failed. +func streamBuildPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) error { + podName := fmt.Sprintf("build-%s", pool.Spec.Configuration.Name) + + pod, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get pod %s: %w", podName, err) + } + + return streamPodContainerLogsToFile(ctx, t, cs, pod) +} + +// Attaches a follower to each of the containers within a given pod in order to +// stream their logs to disk for future debugging. +func streamPodContainerLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, pod *corev1.Pod) error { + errGroup, egCtx := errgroup.WithContext(ctx) + + for _, container := range pod.Spec.Containers { + container := container + pod := pod.DeepCopy() + + // Because we follow the logs for each container in a build pod, this + // blocks the current Goroutine. So we run each log stream operation in a + // separate Goroutine to avoid blocking the main Goroutine. + errGroup.Go(func() error { + return streamContainerLogToFile(egCtx, t, cs, pod, container) + }) + } + + // Only propagate errors that are not a context cancellation. + if err := errGroup.Wait(); err != nil && !errors.Is(err, context.Canceled) { + return err + } + + return nil +} + +// Streams the logs for a given container to a file. +func streamContainerLogToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, pod *corev1.Pod, container corev1.Container) error { + dirPath := getBuildArtifactDir(t) + + logger, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: container.Name, + Follow: true, + }).Stream(ctx) + + defer logger.Close() + + if err != nil { + return fmt.Errorf("could not get logs for container %s in pod %s: %w", container.Name, pod.Name, err) + } + + filename := filepath.Join(dirPath, fmt.Sprintf("%s-%s-%s.log", t.Name(), pod.Name, container.Name)) + file, err := os.Create(filename) + if err != nil { + return err + } + + defer file.Close() + + t.Logf("Streaming pod (%s) container (%s) logs to %s", pod.Name, container.Name, filename) + if _, err := io.Copy(file, logger); err != nil { + return fmt.Errorf("could not write pod logs to %s: %w", filename, err) + } + + return nil +} + +// Skips a given test if it is detected that the cluster is running OKD. We +// skip these tests because they're either irrelevant for OKD or would fail. +func skipOnOKD(t *testing.T) { + cs := framework.NewClientSet("") + + isOKD, err := helpers.IsOKDCluster(cs) + require.NoError(t, err) + + if isOKD { + t.Logf("OKD detected, skipping test %s", t.Name()) + t.Skip() + } +} + +func skipOnOCP(t *testing.T) { + cs := framework.NewClientSet("") + isOKD, err := helpers.IsOKDCluster(cs) + require.NoError(t, err) + + if !isOKD { + t.Logf("OCP detected, skipping test %s", t.Name()) + t.Skip() + } +} + +// Extracts the contents of a directory within a given container to a temporary +// directory. Next, it loads them into a bytes map keyed by filename. It does +// not handle nested directories, so use with caution. +func convertFilesFromContainerImageToBytesMap(t *testing.T, pullspec, containerFilepath string) map[string][]byte { + tempDir := t.TempDir() + + path := fmt.Sprintf("%s:%s", containerFilepath, tempDir) + cmd := exec.Command("oc", "image", "extract", pullspec, "--path", path) + t.Logf("Extracting files under %q from %q to %q; running %s", containerFilepath, pullspec, tempDir, cmd.String()) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + require.NoError(t, cmd.Run()) + + out := map[string][]byte{} + + isCentosImage := strings.Contains(pullspec, "centos") + + err := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + if isCentosImage { + contents = bytes.ReplaceAll(contents, []byte("$stream"), []byte("9-stream")) + } + + // Replace $stream with 9-stream in any of the Centos repo content we pulled. + out[filepath.Base(path)] = contents + return nil + }) + + require.NoError(t, err) + + return out +} + +// Copy the entitlement certificates into the MCO namespace. If the secrets +// cannot be found, calls t.Skip() to skip the test. +// +// Registers and returns a cleanup function to remove the certificate(s) after test completion. +func copyEntitlementCerts(t *testing.T, cs *framework.ClientSet) func() { + namespace := "openshift-config-managed" + name := "etc-pki-entitlement" + + _, err := cs.CoreV1Interface.Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return cloneSecret(t, cs, name, namespace, name, ctrlcommon.MCONamespace) + } + + if apierrs.IsNotFound(err) { + t.Logf("Secret %q not found in %q, skipping test", name, namespace) + t.Skip() + return func() {} + } + + t.Fatalf("could not get %q from %q: %s", name, namespace, err) + return func() {} +} + +// Uses the centos stream 9 container and extracts the contents of both the +// /etc/yum.repos.d and /etc/pki/rpm-gpg directories and injects those into a +// ConfigMap and Secret, respectively. This is so that the build process will +// consume those objects as part of the build process, injecting them into the +// build context. +func injectYumRepos(t *testing.T, cs *framework.ClientSet) func() { + tempDir := t.TempDir() + + yumReposPath := filepath.Join(tempDir, "yum-repos-d") + require.NoError(t, os.MkdirAll(yumReposPath, 0o755)) + + centosPullspec := "quay.io/centos/centos:stream9" + yumReposContents := convertFilesFromContainerImageToBytesMap(t, centosPullspec, "/etc/yum.repos.d/") + rpmGpgContents := convertFilesFromContainerImageToBytesMap(t, centosPullspec, "/etc/pki/rpm-gpg/") + + configMapCleanupFunc := createConfigMap(t, cs, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-yum-repos-d", + Namespace: ctrlcommon.MCONamespace, + }, + // Note: Even though the BuildController retrieves this ConfigMap, it only + // does so to determine whether or not it is present. It does not look at + // its contents. For that reason, we can use the BinaryData field here + // because the Build Pod will use its contents the same regardless of + // whether its string data or binary data. + BinaryData: yumReposContents, + }) + + secretCleanupFunc := createSecret(t, cs, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-pki-rpm-gpg", + Namespace: ctrlcommon.MCONamespace, + }, + Data: rpmGpgContents, + }) + + return makeIdempotentAndRegister(t, func() { + configMapCleanupFunc() + secretCleanupFunc() + }) +} + +// Clones a given secret from a given namespace into the MCO namespace. +// Registers and returns a cleanup function to delete the secret upon test +// completion. +func cloneSecret(t *testing.T, cs *framework.ClientSet, srcName, srcNamespace, dstName, dstNamespace string) func() { + secret, err := cs.CoreV1Interface.Secrets(srcNamespace).Get(context.TODO(), srcName, metav1.GetOptions{}) + require.NoError(t, err) + + secretCopy := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dstName, + Namespace: dstNamespace, + }, + Data: secret.Data, + Type: secret.Type, + } + + cleanup := createSecret(t, cs, secretCopy) + t.Logf("Cloned \"%s/%s\" to \"%s/%s\"", srcNamespace, srcName, dstNamespace, dstName) + + return makeIdempotentAndRegister(t, func() { + cleanup() + t.Logf("Deleted cloned secret \"%s/%s\"", dstNamespace, dstName) + }) +} diff --git a/test/e2e-techpreview/onclusterbuild_test.go b/test/e2e-techpreview/onclusterbuild_test.go index 3f508dea01..386cd29b20 100644 --- a/test/e2e-techpreview/onclusterbuild_test.go +++ b/test/e2e-techpreview/onclusterbuild_test.go @@ -2,15 +2,16 @@ package e2e_techpreview_test import ( "context" + _ "embed" "flag" "strings" "testing" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build" "github.com/openshift/machine-config-operator/test/framework" @@ -18,6 +19,7 @@ import ( "github.com/stretchr/testify/require" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -31,15 +33,28 @@ const ( // The name of the global pull secret copy to use for the tests. globalPullSecretCloneName string = "global-pull-secret-copy" +) - // The custom Dockerfile content to build for the tests. - cowsayDockerfile string = `FROM quay.io/centos/centos:stream9 AS centos -RUN dnf install -y epel-release -FROM configs AS final -COPY --from=centos /etc/yum.repos.d /etc/yum.repos.d -COPY --from=centos /etc/pki/rpm-gpg/RPM-GPG-KEY-* /etc/pki/rpm-gpg/ -RUN sed -i 's/\$stream/9-stream/g' /etc/yum.repos.d/centos*.repo && \ - rpm-ostree install cowsay` +var ( + // Provides a Containerfile that installs cowsayusing the Centos Stream 9 + // EPEL repository to do so without requiring any entitlements. + //go:embed Containerfile.cowsay + cowsayDockerfile string + + // Provides a Containerfile that installs Buildah from the default RHCOS RPM + // repositories. If the installation succeeds, the entitlement certificate is + // working. + //go:embed Containerfile.entitled + entitledDockerfile string + + // Provides a Containerfile that works similarly to the cowsay Dockerfile + // with the exception that the /etc/yum.repos.d and /etc/pki/rpm-gpg key + // content is mounted into the build context by the BuildController. + //go:embed Containerfile.yum-repos-d + yumReposDockerfile string + + //go:embed Containerfile.okd-fcos + okdFcosDockerfile string ) var skipCleanup bool @@ -62,15 +77,21 @@ type onClusterBuildTestOpts struct { // What MachineConfigPool name to use for the test. poolName string + + // Use RHEL entitlements + useEtcPkiEntitlement bool + + // Inject YUM repo information from a Centos 9 stream container + useYumRepos bool } -// Tests that an on-cluster build can be performed with the OpenShift Image Builder. -func TestOnClusterBuildsOpenshiftImageBuilder(t *testing.T) { +func TestOnClusterBuildsOnOKD(t *testing.T) { + skipOnOCP(t) + runOnClusterBuildTest(t, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, - poolName: layeredMCPName, + poolName: layeredMCPName, customDockerfiles: map[string]string{ - layeredMCPName: cowsayDockerfile, + layeredMCPName: okdFcosDockerfile, }, }) } @@ -78,8 +99,7 @@ func TestOnClusterBuildsOpenshiftImageBuilder(t *testing.T) { // Tests tha an on-cluster build can be performed with the Custom Pod Builder. func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { runOnClusterBuildTest(t, onClusterBuildTestOpts{ - imageBuilderType: build.CustomPodImageBuilder, - poolName: layeredMCPName, + poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, }, @@ -90,8 +110,7 @@ func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { // is rolled out to an opted-in node. func TestOnClusterBuildRollsOutImage(t *testing.T) { imagePullspec := runOnClusterBuildTest(t, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, - poolName: layeredMCPName, + poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, }, @@ -108,97 +127,175 @@ func TestOnClusterBuildRollsOutImage(t *testing.T) { t.Log(helpers.ExecCmdOnNode(t, cs, node, "chroot", "/rootfs", "cowsay", "Moo!")) } +// This test extracts the /etc/yum.repos.d and /etc/pki/rpm-gpg content from a +// Centos Stream 9 image and injects them into the MCO namespace. It then +// performs a build with the expectation that these artifacts will be used, +// simulating a build where someone has added this content; usually a Red Hat +// Satellite user. +func TestYumReposBuilds(t *testing.T) { + runOnClusterBuildTest(t, onClusterBuildTestOpts{ + poolName: layeredMCPName, + customDockerfiles: map[string]string{ + layeredMCPName: yumReposDockerfile, + }, + useYumRepos: true, + }) +} + +// Clones the etc-pki-entitlement certificate from the openshift-config-managed +// namespace into the MCO namespace. Then performs an on-cluster layering build +// which should consume the entitlement certificates. +func TestEntitledBuilds(t *testing.T) { + skipOnOKD(t) + + runOnClusterBuildTest(t, onClusterBuildTestOpts{ + poolName: layeredMCPName, + customDockerfiles: map[string]string{ + layeredMCPName: entitledDockerfile, + }, + useEtcPkiEntitlement: true, + }) +} + // Sets up and performs an on-cluster build for a given set of parameters. // Returns the built image pullspec for later consumption. func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string { + ctx, cancel := context.WithCancel(context.Background()) + cancel = makeIdempotentAndRegister(t, cancel) + cs := framework.NewClientSet("") - t.Logf("Running with ImageBuilder type: %s", testOpts.imageBuilderType) + imageBuilder := testOpts.imageBuilderType + if testOpts.imageBuilderType == "" { + imageBuilder = build.CustomPodImageBuilder + } + + t.Logf("Running with ImageBuilder type: %s", imageBuilder) - prepareForTest(t, cs, testOpts) + mosc := prepareForTest(t, cs, testOpts) - optPoolIntoLayering(t, cs, testOpts.poolName) + t.Cleanup(createMachineOSConfig(t, cs, mosc)) - t.Logf("Wait for build to start") - waitForPoolToReachState(t, cs, testOpts.poolName, func(mcp *mcfgv1.MachineConfigPool) bool { - return ctrlcommon.NewLayeredPoolState(mcp).IsBuilding() - }) + // Create a child context for the machine-os-builder pod log streamer. We + // create it here because we want the cancellation to run before the + // MachineOSConfig object is removed. + mobPodStreamerCtx, mobPodStreamerCancel := context.WithCancel(ctx) + t.Cleanup(mobPodStreamerCancel) - t.Logf("Build started! Waiting for completion...") - imagePullspec := "" - waitForPoolToReachState(t, cs, testOpts.poolName, func(mcp *mcfgv1.MachineConfigPool) bool { - lps := ctrlcommon.NewLayeredPoolState(mcp) - if lps.HasOSImage() && lps.IsBuildSuccess() { - imagePullspec = lps.GetOSImage() - return true + t.Logf("Wait for build to start") + waitForMachineOSBuildToReachState(t, cs, testOpts.poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + // If we had any errors retrieving the build, (other than it not existing yet), stop here. + if err != nil && !k8serrors.IsNotFound(err) { + return false, err } - if lps.IsBuildFailure() { - t.Fatalf("Build unexpectedly failed.") + // The build object has not been created yet, requeue and try again. + if mosb == nil && k8serrors.IsNotFound(err) { + t.Logf("MachineOSBuild does not exist yet, retrying...") + return false, nil } - return false + // At this point, the build object exists and we want to ensure that it is running. + return ctrlcommon.NewMachineOSBuildState(mosb).IsBuilding(), nil }) - t.Logf("MachineConfigPool %q has finished building. Got image: %s", testOpts.poolName, imagePullspec) - - return imagePullspec -} - -// Adds the layeringEnabled label to the target MachineConfigPool and registers -// / returns a function to unlabel it. -func optPoolIntoLayering(t *testing.T, cs *framework.ClientSet, pool string) func() { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{}) - require.NoError(t, err) + t.Logf("Build started! Waiting for completion...") - if mcp.Labels == nil { - mcp.Labels = map[string]string{} - } + // Create a child context for the build pod log streamer. This is so we can + // cancel it independently of the parent context or the context for the + // machine-os-build pod watcher (which has its own separate context). + buildPodStreamerCtx, buildPodStreamerCancel := context.WithCancel(ctx) - mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + // We wire this to both t.Cleanup() as well as defer because we want to + // cancel this context either at the end of this function or when the test + // fails, whichever comes first. + buildPodWatcherShutdown := makeIdempotentAndRegister(t, buildPodStreamerCancel) + defer buildPodWatcherShutdown() - _, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) - if err == nil { - t.Logf("Added label %q to MachineConfigPool %s to opt into layering", ctrlcommon.LayeringEnabledPoolLabel, pool) + t.Cleanup(func() { + if t.Failed() { + writeBuildArtifactsToFiles(t, cs, testOpts.poolName) } - return err }) - require.NoError(t, err) + // The pod log collection blocks the main Goroutine since we follow the logs + // for each container in the build pod. So they must run in a separate + // Goroutine so that the rest of the test can continue. + go func() { + pool, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(buildPodStreamerCtx, testOpts.poolName, metav1.GetOptions{}) + require.NoError(t, err) + err = streamBuildPodLogsToFile(buildPodStreamerCtx, t, cs, pool) + require.NoError(t, err, "expected no error, got %s", err) + }() + + // We also want to collect logs from the machine-os-builder pod since they + // can provide a valuable window in how / why a test failed. As mentioned + // above, we need to run this in a separate Goroutine so that the test is not + // blocked. + go func() { + err := streamMachineOSBuilderPodLogsToFile(mobPodStreamerCtx, t, cs) + require.NoError(t, err, "expected no error, got: %s", err) + }() + + var build *mcfgv1alpha1.MachineOSBuild + waitForMachineOSBuildToReachState(t, cs, testOpts.poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + if err != nil { + return false, err + } - return makeIdempotentAndRegister(t, func() { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{}) - require.NoError(t, err) + build = mosb - delete(mcp.Labels, ctrlcommon.LayeringEnabledPoolLabel) + state := ctrlcommon.NewMachineOSBuildState(mosb) - _, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) - if err == nil { - t.Logf("Removed label %q to MachineConfigPool %s to opt out of layering", ctrlcommon.LayeringEnabledPoolLabel, pool) - } - return err - }) + if state.IsBuildFailure() { + t.Fatalf("MachineOSBuild %q unexpectedly failed", mosb.Name) + } - require.NoError(t, err) + return state.IsBuildSuccess(), nil }) + + t.Logf("MachineOSBuild %q has finished building. Got image: %s", build.Name, build.Status.FinalImagePushspec) + + return build.Status.FinalImagePushspec } // Prepares for an on-cluster build test by performing the following: // - Gets the Docker Builder secret name from the MCO namespace. // - Creates the imagestream to use for the test. // - Clones the global pull secret into the MCO namespace. +// - If requested, clones the RHEL entitlement secret into the MCO namespace. // - Creates the on-cluster-build-config ConfigMap. // - Creates the target MachineConfigPool and waits for it to get a rendered config. // - Creates the on-cluster-build-custom-dockerfile ConfigMap. // // Each of the object creation steps registers an idempotent cleanup function // that will delete the object at the end of the test. -func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBuildTestOpts) { +// +// Returns a MachineOSConfig object for the caller to create to begin the build +// process. +func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBuildTestOpts) *mcfgv1alpha1.MachineOSConfig { + // If the test requires RHEL entitlements, clone them from + // "etc-pki-entitlement" in the "openshift-config-managed" namespace. + if testOpts.useEtcPkiEntitlement { + t.Cleanup(copyEntitlementCerts(t, cs)) + } + + // If the test requires /etc/yum.repos.d and /etc/pki/rpm-gpg, pull a Centos + // Stream 9 container image and populate them from there. This is intended to + // emulate the Red Hat Satellite enablement process, but does not actually + // require any Red Hat Satellite creds to work. + if testOpts.useYumRepos { + t.Cleanup(injectYumRepos(t, cs)) + } + pushSecretName, err := getBuilderPushSecretName(cs) require.NoError(t, err) + // Register ephemeral object cleanup function. + t.Cleanup(func() { + cleanupEphemeralBuildObjects(t, cs) + }) + imagestreamName := "os-image" t.Cleanup(createImagestream(t, cs, imagestreamName)) @@ -207,30 +304,49 @@ func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBui finalPullspec, err := getImagestreamPullspec(cs, imagestreamName) require.NoError(t, err) - cmCleanup := createConfigMap(t, cs, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: build.OnClusterBuildConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: map[string]string{ - build.BaseImagePullSecretNameConfigKey: globalPullSecretCloneName, - build.FinalImagePushSecretNameConfigKey: pushSecretName, - build.FinalImagePullspecConfigKey: finalPullspec, - build.ImageBuilderTypeConfigMapKey: string(testOpts.imageBuilderType), - }, - }) - - t.Cleanup(cmCleanup) - t.Cleanup(makeIdempotentAndRegister(t, helpers.CreateMCP(t, cs, testOpts.poolName))) - t.Cleanup(createCustomDockerfileConfigMap(t, cs, testOpts.customDockerfiles)) - _, err = helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, "00-worker") require.NoError(t, err) + + return &mcfgv1alpha1.MachineOSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: testOpts.poolName, + }, + Spec: mcfgv1alpha1.MachineOSConfigSpec{ + MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{ + Name: testOpts.poolName, + }, + BuildInputs: mcfgv1alpha1.BuildInputs{ + BaseImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ + Name: globalPullSecretCloneName, + }, + RenderedImagePushSecret: mcfgv1alpha1.ImageSecretObjectReference{ + Name: pushSecretName, + }, + RenderedImagePushspec: finalPullspec, + ImageBuilder: &mcfgv1alpha1.MachineOSImageBuilder{ + ImageBuilderType: mcfgv1alpha1.PodBuilder, + }, + Containerfile: []mcfgv1alpha1.MachineOSContainerfile{ + { + ContainerfileArch: mcfgv1alpha1.NoArch, + Content: testOpts.customDockerfiles[testOpts.poolName], + }, + }, + }, + BuildOutputs: mcfgv1alpha1.BuildOutputs{ + CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ + Name: pushSecretName, + }, + }, + }, + } } func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { + t.Skip() + cs := framework.NewClientSet("") // label random node from pool, get the node @@ -239,7 +355,6 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { // prepare for on cluster build test prepareForTest(t, cs, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, poolName: layeredMCPName, customDockerfiles: map[string]string{}, }) diff --git a/test/framework/clientset.go b/test/framework/clientset.go index 6c4e2dca78..caa2016c1a 100644 --- a/test/framework/clientset.go +++ b/test/framework/clientset.go @@ -8,6 +8,7 @@ import ( clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" clientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" + clientmachineconfigv1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" clientoperatorsv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" clientapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -26,6 +27,7 @@ type ClientSet struct { clientoperatorsv1alpha1.OperatorV1alpha1Interface clientbuildv1.BuildV1Interface clientimagev1.ImageV1Interface + clientmachineconfigv1alpha1.MachineconfigurationV1alpha1Interface kubeconfig string config *rest.Config } @@ -72,14 +74,15 @@ func NewClientSet(kubeconfig string) *ClientSet { // NewClientSetFromConfig returns a *ClientBuilder with the given rest config. func NewClientSetFromConfig(config *rest.Config) *ClientSet { return &ClientSet{ - CoreV1Interface: corev1client.NewForConfigOrDie(config), - AppsV1Interface: appsv1client.NewForConfigOrDie(config), - ConfigV1Interface: clientconfigv1.NewForConfigOrDie(config), - MachineconfigurationV1Interface: clientmachineconfigv1.NewForConfigOrDie(config), - ApiextensionsV1Interface: clientapiextensionsv1.NewForConfigOrDie(config), - OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), - BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), - ImageV1Interface: clientimagev1.NewForConfigOrDie(config), - config: config, + CoreV1Interface: corev1client.NewForConfigOrDie(config), + AppsV1Interface: appsv1client.NewForConfigOrDie(config), + ConfigV1Interface: clientconfigv1.NewForConfigOrDie(config), + MachineconfigurationV1Interface: clientmachineconfigv1.NewForConfigOrDie(config), + ApiextensionsV1Interface: clientapiextensionsv1.NewForConfigOrDie(config), + OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), + BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), + ImageV1Interface: clientimagev1.NewForConfigOrDie(config), + MachineconfigurationV1alpha1Interface: clientmachineconfigv1alpha1.NewForConfigOrDie(config), + config: config, } } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml index 26bd68689c..6e3112a961 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml @@ -155,4 +155,4 @@ spec: served: true storage: true subresources: - status: {} + status: {} \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644