diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 432577f80b..26a84a666a 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -195,6 +195,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), ctx.KubeInformerFactory.Core().V1().Nodes(), ctx.KubeInformerFactory.Core().V1().Pods(), + ctx.InformerFactory.Machineconfiguration().V1alpha1().MachineOSBuilds(), ctx.ConfigInformerFactory.Config().V1().Schedulers(), ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), diff --git a/cmd/machine-os-builder/start.go b/cmd/machine-os-builder/start.go index f9348f8593..fd7ff210b5 100644 --- a/cmd/machine-os-builder/start.go +++ b/cmd/machine-os-builder/start.go @@ -7,6 +7,8 @@ import ( "fmt" "os" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/internal/clients" "github.com/openshift/machine-config-operator/pkg/controller/build" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -37,6 +39,12 @@ func init() { startCmd.PersistentFlags().StringVar(&startOpts.kubeconfig, "kubeconfig", "", "Kubeconfig file to access a remote cluster (testing only)") } +func getMachineOSConfigs(ctx context.Context, cb *clients.Builder) (*mcfgv1alpha1.MachineOSConfigList, error) { + mcfgClient := cb.MachineConfigClientOrDie(componentName) + return mcfgClient.MachineconfigurationV1alpha1().MachineOSConfigs().List(ctx, metav1.ListOptions{}) + +} + // Checks if the on-cluster-build-config ConfigMap exists. If it exists, return the ConfigMap. // If not, return an error. func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*corev1.ConfigMap, error) { @@ -57,13 +65,8 @@ func getBuildControllerConfigMap(ctx context.Context, cb *clients.Builder) (*cor // Creates a new BuildController configured for a certain image builder based // upon the imageBuilderType key in the on-cluster-build-config ConfigMap. -func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Controller, error) { - onClusterBuildConfigMap, err := getBuildControllerConfigMap(ctx, cb) - if err != nil { - return nil, err - } - - imageBuilderType, err := build.GetImageBuilderType(onClusterBuildConfigMap) +func getBuildController(ctx context.Context, cb *clients.Builder) ([]*build.Controller, error) { + machineOSConfigs, err := getMachineOSConfigs(ctx, cb) if err != nil { return nil, err } @@ -72,11 +75,12 @@ func getBuildController(ctx context.Context, cb *clients.Builder) (*build.Contro buildClients := build.NewClientsFromControllerContext(ctrlCtx) cfg := build.DefaultBuildControllerConfig() - if imageBuilderType == build.OpenshiftImageBuilder { - return build.NewWithImageBuilder(cfg, buildClients), nil - } + controllersToStart := []*build.Controller{} - return build.NewWithCustomPodBuilder(cfg, buildClients), nil + for range machineOSConfigs.Items { + controllersToStart = append(controllersToStart, build.NewWithCustomPodBuilder(cfg, buildClients)) + } + return controllersToStart, nil } func runStartCmd(_ *cobra.Command, _ []string) { @@ -90,13 +94,12 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Infof("Version: %+v (%s)", version.Raw, version.Hash) ctx, cancel := context.WithCancel(context.Background()) - cb, err := clients.NewBuilder("") if err != nil { klog.Fatalln(err) } - ctrl, err := getBuildController(ctx, cb) + controllers, err := getBuildController(ctx, cb) if err != nil { klog.Fatalln(err) var invalidImageBuiler *build.ErrInvalidImageBuilder @@ -107,7 +110,14 @@ func runStartCmd(_ *cobra.Command, _ []string) { } } - go ctrl.Run(ctx, 5) - <-ctx.Done() + // is this... allowed? + // since users can specify different settings per pool, we need to run a controller PER pool. Otherwise, settings will be conflated, as will failures and builds. + for _, ctrl := range controllers { + go ctrl.Run(ctx, 3) + <-ctx.Done() + cancel() + } + cancel() + } diff --git a/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml b/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml index fc831ee40f..2712cca95b 100644 --- a/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml +++ b/install/0000_80_machine-config_00_clusterreader_clusterrole.yaml @@ -56,3 +56,23 @@ rules: - get - list - watch + - apiGroups: + - machineconfiguration.openshift.io + resources: + - machineosconfigs + - machineosconfigs/status + verbs: + - create + - update + - patch + - get + - apiGroups: + - machineconfiguration.openshift.io + resources: + - machineosbuilds + - machineosbuilds/status + verbs: + - create + - update + - patch + - get diff --git a/manifests/machineconfigcontroller/clusterrole.yaml b/manifests/machineconfigcontroller/clusterrole.yaml index 9bd6ac4260..98f0820848 100644 --- a/manifests/machineconfigcontroller/clusterrole.yaml +++ b/manifests/machineconfigcontroller/clusterrole.yaml @@ -42,6 +42,12 @@ rules: - apiGroups: ["operator.openshift.io"] resources: ["machineconfigurations"] verbs: ["get","list","watch"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineosconfigs", "machineosconfigs/status"] + verbs: ["create", "update", "patch", "get"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineosbuilds", "machineosbuilds/status"] + verbs: ["create", "update", "patch", "get"] - apiGroups: - authentication.k8s.io resources: diff --git a/pkg/apihelpers/machineosbuild_apihelpers.go b/pkg/apihelpers/machineosbuild_apihelpers.go new file mode 100644 index 0000000000..31367cb5b4 --- /dev/null +++ b/pkg/apihelpers/machineosbuild_apihelpers.go @@ -0,0 +1,79 @@ +package apihelpers + +import ( + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewMachineOSBuildCondition creates a new MMachineOSBuild condition. +func NewMachineOSBuildCondition(condType string, status metav1.ConditionStatus, reason, message string) *metav1.Condition { + return &metav1.Condition{ + Type: condType, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + } +} + +func GetMachineOSBuildCondition(status mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) *metav1.Condition { + // in case of sync errors, return the last condition that matches, not the first + // this exists for redundancy and potential race conditions. + var LatestState *metav1.Condition + for i := range status.Conditions { + c := status.Conditions[i] + if mcfgv1alpha1.BuildProgress(c.Type) == condType { + LatestState = &c + } + } + return LatestState +} + +// SetMachineConfigPoolCondition updates the MachineConfigPool to include the provided condition. If the condition that +// we are about to add already exists and has the same status and reason then we are not going to update. +func SetMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condition metav1.Condition) { + currentCond := GetMachineOSBuildCondition(*status, mcfgv1alpha1.BuildProgress(condition.Type)) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && currentCond.Message == condition.Message { + return + } + // Do not update lastTransitionTime if the status of the condition doesn't change. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + + // this may not be necessary + newConditions := filterOutMachineOSBuildCondition(status.Conditions, mcfgv1alpha1.BuildProgress(condition.Type)) + status.Conditions = append(newConditions, condition) +} + +// RemoveMachineOSBuildCondition removes the MachineOSBuild condition with the provided type. +func RemoveMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) { + status.Conditions = filterOutMachineOSBuildCondition(status.Conditions, condType) +} + +// filterOutMachineOSBuildCondition returns a new slice of MachineOSBuild conditions without conditions with the provided type. +func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mcfgv1alpha1.BuildProgress) []metav1.Condition { + var newConditions []metav1.Condition + for _, c := range conditions { + if mcfgv1alpha1.BuildProgress(c.Type) == condType { + continue + } + newConditions = append(newConditions, c) + } + return newConditions +} + +func IsMachineOSBuildConditionTrue(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress) bool { + return IsMachineOSBuildConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue) +} + +// IsMachineOSBuildConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsMachineOSBuildConditionPresentAndEqual(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress, status metav1.ConditionStatus) bool { + for _, condition := range conditions { + if mcfgv1alpha1.BuildProgress(condition.Type) == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template index 26420b4931..49471ab139 100644 --- a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template +++ b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template @@ -5,22 +5,22 @@ # Decode and extract the MachineConfig from the gzipped ConfigMap and move it # into position. We do this in a separate stage so that we don't have the # gzipped MachineConfig laying around. -FROM {{.BaseImage.Pullspec}} AS extract +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS extract COPY ./machineconfig/machineconfig.json.gz /tmp/machineconfig.json.gz RUN mkdir -p /etc/machine-config-daemon && \ cat /tmp/machineconfig.json.gz | base64 -d | gunzip - > /etc/machine-config-daemon/currentconfig -{{if .ExtensionsImage.Pullspec}} +{{if .MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}} # Pull our extensions image. Not sure yet what / how this should be wired up # though. Ideally, I'd like to use some Buildah tricks to have the extensions # directory mounted into the container at build-time so that I don't have to # copy the RPMs into the container, configure the repo, and do the # installation. Alternatively, I'd have to start a pod with an HTTP server. -FROM {{.ExtensionsImage.Pullspec}} AS extensions +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec}} AS extensions {{end}} -FROM {{.BaseImage.Pullspec}} AS configs +FROM {{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} AS configs # Copy the extracted MachineConfig into the expected place in the image. COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config-daemon/currentconfig # Do the ignition live-apply, extracting the Ignition config from the MachineConfig. @@ -29,11 +29,11 @@ COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config RUN container="oci" exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported <(cat /etc/machine-config-daemon/currentconfig | jq '.spec.config') && \ ostree container commit -LABEL machineconfig={{.Pool.Spec.Configuration.Name}} -LABEL machineconfigpool={{.Pool.Name}} -LABEL releaseversion={{.ReleaseVersion}} -LABEL baseOSContainerImage={{.BaseImage.Pullspec}} +LABEL machineconfig={{.MachineOSBuild.Spec.DesiredConfig.Name}} +LABEL machineconfigpool={{.MachineOSConfig.Spec.MachineConfigPool.Name}} +LABEL releaseversion={{.MachineOSConfig.Spec.BuildInputs.ReleaseVersion}} +LABEL baseOSContainerImage={{.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec}} -{{if .CustomDockerfile}} -{{.CustomDockerfile}} +{{if .Containerfile}} +{{.Containerfile}} {{end}} diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index be416aefa6..2e18df590d 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "fmt" - "os" "strings" "time" "github.com/containers/image/v5/docker/reference" - buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" aggerrors "k8s.io/apimachinery/pkg/util/errors" @@ -38,6 +39,7 @@ import ( mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" corelistersv1 "k8s.io/client-go/listers/core/v1" coreinformers "k8s.io/client-go/informers" @@ -47,6 +49,7 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" "github.com/openshift/machine-config-operator/internal/clients" @@ -109,23 +112,22 @@ func (e *ErrInvalidImageBuilder) Error() string { // Image builder constants. type ImageBuilderType string +type BuildProgress string const ( - // ImageBuilderTypeConfigMapKey is the key in the ConfigMap that determines which type of image builder to use. - ImageBuilderTypeConfigMapKey string = "imageBuilderType" - - // OpenshiftImageBuilder is the constant indicating use of the OpenShift image builder. - OpenshiftImageBuilder ImageBuilderType = "openshift-image-builder" // CustomPodImageBuilder is the constant indicating use of the custom pod image builder. - CustomPodImageBuilder ImageBuilderType = "custom-pod-builder" + CustomPodImageBuilder ImageBuilderType = "CustomPodBuilder" + // Missing build progress constants from the API + MachineOSBuildReady BuildProgress = "Ready" + MachineOSBuildRestarted BuildProgress = "Restarted" ) var ( // controllerKind contains the schema.GroupVersionKind for this controller type. //nolint:varcheck,deadcode // This will be used eventually controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") - validImageBuilderTypes = sets.New[ImageBuilderType](OpenshiftImageBuilder, CustomPodImageBuilder) + validImageBuilderTypes = sets.New[ImageBuilderType](CustomPodImageBuilder) ) //nolint:revive // If I name this ControllerConfig, that name will be overloaded :P @@ -147,9 +149,8 @@ type BuildControllerConfig struct { type ImageBuilder interface { Run(context.Context, int) StartBuild(ImageBuildRequest) (*corev1.ObjectReference, error) - IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) - DeleteBuildObject(*mcfgv1.MachineConfigPool) error - FinalPullspec(*mcfgv1.MachineConfigPool) (string, error) + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) + DeleteBuildObject(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) error } // Controller defines the build controller. @@ -159,18 +160,21 @@ type Controller struct { eventRecorder record.EventRecorder - syncHandler func(mcp string) error - enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + syncHandler func(build string) error - cmLister corelistersv1.ConfigMapLister - ccLister mcfglistersv1.ControllerConfigLister - mcpLister mcfglistersv1.MachineConfigPoolLister + cmLister corelistersv1.ConfigMapLister + ccLister mcfglistersv1.ControllerConfigLister + mcpLister mcfglistersv1.MachineConfigPoolLister + machineOSBuildLister mcfglistersv1alpha1.MachineOSBuildLister + machineOSConfigLister mcfglistersv1alpha1.MachineOSConfigLister - ccListerSynced cache.InformerSynced - mcpListerSynced cache.InformerSynced - podListerSynced cache.InformerSynced + machineOSConfigListerSynced cache.InformerSynced + machineOSBuildListerSynced cache.InformerSynced + ccListerSynced cache.InformerSynced + mcpListerSynced cache.InformerSynced + podListerSynced cache.InformerSynced - queue workqueue.RateLimitingInterface + mosQueue workqueue.RateLimitingInterface config BuildControllerConfig imageBuilder ImageBuilder @@ -206,12 +210,14 @@ func NewClients(cb *clients.Builder) *Clients { // Holds and starts each of the infomrers used by the Build Controller and its subcontrollers. type informers struct { - ccInformer mcfginformersv1.ControllerConfigInformer - mcpInformer mcfginformersv1.MachineConfigPoolInformer - buildInformer buildinformersv1.BuildInformer - podInformer coreinformersv1.PodInformer - cmInformer coreinformersv1.ConfigMapInformer - toStart []interface{ Start(<-chan struct{}) } + ccInformer mcfginformersv1.ControllerConfigInformer + mcpInformer mcfginformersv1.MachineConfigPoolInformer + buildInformer buildinformersv1.BuildInformer + podInformer coreinformersv1.PodInformer + cmInformer coreinformersv1.ConfigMapInformer + machineOSBuildInformer mcfginformersv1alpha1.MachineOSBuildInformer + machineOSConfigInformer mcfginformersv1alpha1.MachineOSConfigInformer + toStart []interface{ Start(<-chan struct{}) } } // Starts the informers, wiring them up to the provided context. @@ -228,19 +234,26 @@ func newInformers(bcc *Clients) *informers { cmInformer := coreinformers.NewFilteredSharedInformerFactory(bcc.kubeclient, 0, ctrlcommon.MCONamespace, nil) buildInformer := buildinformers.NewSharedInformerFactoryWithOptions(bcc.buildclient, 0, buildinformers.WithNamespace(ctrlcommon.MCONamespace)) podInformer := coreinformers.NewSharedInformerFactoryWithOptions(bcc.kubeclient, 0, coreinformers.WithNamespace(ctrlcommon.MCONamespace)) + // this may not work, might need a new mcfg client and or a new informer pkg + machineOSBuildInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + machineOSConfigInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) return &informers{ - ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), - mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), - cmInformer: cmInformer.Core().V1().ConfigMaps(), - buildInformer: buildInformer.Build().V1().Builds(), - podInformer: podInformer.Core().V1().Pods(), + ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), + mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), + cmInformer: cmInformer.Core().V1().ConfigMaps(), + buildInformer: buildInformer.Build().V1().Builds(), + podInformer: podInformer.Core().V1().Pods(), + machineOSBuildInformer: machineOSBuildInformer.Machineconfiguration().V1alpha1().MachineOSBuilds(), + machineOSConfigInformer: machineOSConfigInformer.Machineconfiguration().V1alpha1().MachineOSConfigs(), toStart: []interface{ Start(<-chan struct{}) }{ ccInformer, mcpInformer, buildInformer, cmInformer, podInformer, + machineOSBuildInformer, + machineOSConfigInformer, }, } } @@ -258,26 +271,35 @@ func newBuildController( informers: newInformers(clients), Clients: clients, eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-buildcontroller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-buildcontroller"), + mosQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder"), config: ctrlConfig, } - ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addMachineConfigPool, - UpdateFunc: ctrl.updateMachineConfigPool, - DeleteFunc: ctrl.deleteMachineConfigPool, - }) + ctrl.syncHandler = ctrl.syncMachineOSBuilder - ctrl.cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerDetailedFuncs{ - UpdateFunc: ctrl.updateConfigMap, + ctrl.ccLister = ctrl.ccInformer.Lister() + ctrl.mcpLister = ctrl.mcpInformer.Lister() + + ctrl.machineOSConfigLister = ctrl.machineOSConfigInformer.Lister() + ctrl.machineOSBuildLister = ctrl.machineOSBuildInformer.Lister() + + ctrl.machineOSBuildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineOSBuild, + DeleteFunc: ctrl.deleteMachineOSBuild, }) - ctrl.syncHandler = ctrl.syncMachineConfigPool - ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + ctrl.machineOSConfigInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineOSConfig, + AddFunc: ctrl.addMachineOSConfig, + DeleteFunc: ctrl.deleteMachineOSConfig, + }) - ctrl.ccLister = ctrl.ccInformer.Lister() - ctrl.mcpLister = ctrl.mcpInformer.Lister() + ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: ctrl.updateMachineConfigPool, + }) + ctrl.machineOSConfigListerSynced = ctrl.machineOSConfigInformer.Informer().HasSynced + ctrl.machineOSBuildListerSynced = ctrl.machineOSBuildInformer.Informer().HasSynced ctrl.ccListerSynced = ctrl.ccInformer.Informer().HasSynced ctrl.mcpListerSynced = ctrl.mcpInformer.Informer().HasSynced @@ -295,17 +317,6 @@ func NewWithCustomPodBuilder( return ctrl } -// Creates a Build Controller instance with an OpenShift Image Builder -// implementation for the ImageBuilder. -func NewWithImageBuilder( - ctrlConfig BuildControllerConfig, - clients *Clients, -) *Controller { - ctrl := newBuildController(ctrlConfig, clients) - ctrl.imageBuilder = newImageBuildController(ctrlConfig, clients, ctrl.imageBuildUpdater) - return ctrl -} - // Run executes the render controller. // TODO: Make this use a context instead of a stop channel. func (ctrl *Controller) Run(parentCtx context.Context, workers int) { @@ -315,7 +326,7 @@ func (ctrl *Controller) Run(parentCtx context.Context, workers int) { // Not sure if I actually need a child context here or not. ctx, cancel := context.WithCancel(parentCtx) defer utilruntime.HandleCrash() - defer ctrl.queue.ShutDown() + defer ctrl.mosQueue.ShutDown() defer cancel() ctrl.informers.start(ctx) @@ -327,61 +338,44 @@ func (ctrl *Controller) Run(parentCtx context.Context, workers int) { go ctrl.imageBuilder.Run(ctx, workers) for i := 0; i < workers; i++ { - go wait.Until(ctrl.worker, time.Second, ctx.Done()) + go wait.Until(ctrl.mosWorker, time.Second, ctx.Done()) } <-ctx.Done() } -func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) +func (ctrl *Controller) enqueueMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(mosc) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", mosc, err)) return } - - ctrl.queue.Add(key) + ctrl.mosQueue.Add(key) } -func (ctrl *Controller) enqueueRateLimited(pool *mcfgv1.MachineConfigPool) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) +func (ctrl *Controller) enqueueMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(mosb) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", mosb, err)) return } - ctrl.queue.AddRateLimited(key) -} - -// enqueueAfter will enqueue a pool after the provided amount of time. -func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) - return - } - - ctrl.queue.AddAfter(key, after) -} - -// enqueueDefault calls a default enqueue function -func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { - ctrl.enqueueAfter(pool, ctrl.config.UpdateDelay) + ctrl.mosQueue.Add(key) } // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. -func (ctrl *Controller) worker() { - for ctrl.processNextWorkItem() { +func (ctrl *Controller) mosWorker() { + for ctrl.processNextMosWorkItem() { } } -func (ctrl *Controller) processNextWorkItem() bool { - key, quit := ctrl.queue.Get() +func (ctrl *Controller) processNextMosWorkItem() bool { + key, quit := ctrl.mosQueue.Get() if quit { return false } - defer ctrl.queue.Done(key) + defer ctrl.mosQueue.Done(key) err := ctrl.syncHandler(key.(string)) ctrl.handleErr(err, key) @@ -389,109 +383,60 @@ func (ctrl *Controller) processNextWorkItem() bool { return true } -// Checks for new Data in the on-cluster-build-config configmap -// if the imageBuilderType has changed, we need to restart the controller -func (ctrl *Controller) updateConfigMap(old, new interface{}) { - oldCM := old.(*corev1.ConfigMap).DeepCopy() - newCM := new.(*corev1.ConfigMap).DeepCopy() - if newCM.Name == OnClusterBuildConfigMapName && oldCM.Data[ImageBuilderTypeConfigMapKey] != newCM.Data[ImageBuilderTypeConfigMapKey] { - // restart ctrl and re-init - mcps, _ := ctrl.mcpLister.List(labels.Everything()) - impactedPools := []*mcfgv1.MachineConfigPool{} - for _, mcp := range mcps { - if ctrlcommon.IsLayeredPool(mcp) { - if running, _ := ctrl.imageBuilder.IsBuildRunning(mcp); running { - // building on this pool, we have changed img builder type. Need to stop build - impactedPools = append(impactedPools, mcp) - ps := newPoolState(mcp) - ctrl.imageBuilder.DeleteBuildObject(mcp) - ctrl.markBuildInterrupted(ps) - } - } - } - if ImageBuilderType(newCM.Data[ImageBuilderTypeConfigMapKey]) != OpenshiftImageBuilder && ImageBuilderType(newCM.Data[ImageBuilderTypeConfigMapKey]) != CustomPodImageBuilder { - ctrl.handleConfigMapError(impactedPools, &ErrInvalidImageBuilder{Message: "Invalid Image Builder Type found in configmap", InvalidType: newCM.Data[ImageBuilderTypeConfigMapKey]}, new) - os.Exit(255) - } - os.Exit(0) - } -} - -// Reconciles the MachineConfigPool state with the state of an OpenShift Image -// Builder object. -func (ctrl *Controller) imageBuildUpdater(build *buildv1.Build) error { - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), build.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) +// Reconciles the MachineConfigPool state with the state of a custom pod object. +func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) if err != nil { return err } - klog.Infof("Build (%s) is %s", build.Name, build.Status.Phase) - - objRef := toObjectRef(build) - - ps := newPoolState(pool) - - switch build.Status.Phase { - case buildv1.BuildPhaseNew, buildv1.BuildPhasePending: - if !ps.IsBuildPending() { - err = ctrl.markBuildPendingWithObjectRef(ps, *objRef) - } - case buildv1.BuildPhaseRunning: - // If we're running, then there's nothing to do right now. - if !ps.IsBuilding() { - err = ctrl.markBuildInProgress(ps) - } - case buildv1.BuildPhaseComplete: - // If we've succeeded, we need to update the pool to indicate that. - if !ps.IsBuildSuccess() { - err = ctrl.markBuildSucceeded(ps) - } - case buildv1.BuildPhaseFailed, buildv1.BuildPhaseError, buildv1.BuildPhaseCancelled: - // If we've failed, errored, or cancelled, we need to update the pool to indicate that. - if !ps.IsBuildFailure() { - err = ctrl.markBuildFailed(ps) - } - } + klog.V(4).Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) + mosbs, err := ctrl.machineOSBuildLister.List(labels.Everything()) if err != nil { return err } - - ctrl.enqueueMachineConfigPool(pool) - return nil -} - -// Reconciles the MachineConfigPool state with the state of a custom pod object. -func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + moscs, err := ctrl.machineOSConfigLister.List(labels.Everything()) if err != nil { return err } - klog.Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) - - ps := newPoolState(pool) + var mosb *mcfgv1alpha1.MachineOSBuild + var mosc *mcfgv1alpha1.MachineOSConfig + for _, config := range moscs { + if config.Spec.MachineConfigPool.Name == pool.Name { + mosc = config + break + } + } + for _, build := range mosbs { + if build.Spec.MachineOSConfig.Name == mosc.Name { + mosb = build + break + } + } + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) switch pod.Status.Phase { case corev1.PodPending: - if !ps.IsBuildPending() { + if !mosbState.IsBuildPending() { objRef := toObjectRef(pod) - err = ctrl.markBuildPendingWithObjectRef(ps, *objRef) + err = ctrl.markBuildPendingWithObjectRef(mosc, mosb, *objRef) } case corev1.PodRunning: // If we're running, then there's nothing to do right now. - if !ps.IsBuilding() { - err = ctrl.markBuildInProgress(ps) + if !mosbState.IsBuilding() { + err = ctrl.markBuildInProgress(mosb) } case corev1.PodSucceeded: // If we've succeeded, we need to update the pool to indicate that. - if !ps.IsBuildSuccess() { - err = ctrl.markBuildSucceeded(ps) + if !mosbState.IsBuildSuccess() { + err = ctrl.markBuildSucceeded(mosc, mosb) } case corev1.PodFailed: // If we've failed, we need to update the pool to indicate that. - if !ps.IsBuildFailure() { - err = ctrl.markBuildFailed(ps) + if !mosbState.IsBuildFailure() { + err = ctrl.markBuildFailed(mosc, mosb) } } @@ -499,248 +444,338 @@ func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { return err } - ctrl.enqueueMachineConfigPool(pool) + ctrl.enqueueMachineOSBuild(mosb) return nil } func (ctrl *Controller) handleConfigMapError(pools []*mcfgv1.MachineConfigPool, err error, key interface{}) { klog.V(2).Infof("Error syncing configmap %v: %v", key, err) utilruntime.HandleError(err) + // get mosb assoc. with pool for _, pool := range pools { klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", pool.Name, err) - ctrl.queue.Forget(pool.Name) - ctrl.queue.AddAfter(pool.Name, 1*time.Minute) + ctrl.mosQueue.Forget(pool.Name) + ctrl.mosQueue.AddAfter(pool.Name, 1*time.Minute) } } func (ctrl *Controller) handleErr(err error, key interface{}) { if err == nil { - ctrl.queue.Forget(key) + ctrl.mosQueue.Forget(key) return } - if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { - klog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) - ctrl.queue.AddRateLimited(key) + if ctrl.mosQueue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing machineosbuild %v: %v", key, err) + ctrl.mosQueue.AddRateLimited(key) return } utilruntime.HandleError(err) - klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) - ctrl.queue.Forget(key) - ctrl.queue.AddAfter(key, 1*time.Minute) + klog.V(2).Infof("Dropping machineosbuild %q out of the queue: %v", key, err) + ctrl.mosQueue.Forget(key) + ctrl.mosQueue.AddAfter(key, 1*time.Minute) } -// syncMachineConfigPool will sync the machineconfig pool with the given key. -// This function is not meant to be invoked concurrently with the same key. -func (ctrl *Controller) syncMachineConfigPool(key string) error { - startTime := time.Now() - klog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) - defer func() { - klog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) - }() +func (ctrl *Controller) syncMachineOSBuilder(key string) error { + // startTime := time.Now() + // klog.V(4).Infof("Started syncing build %q (%v)", key, startTime) + // defer func() { + // klog.V(4).Infof("Finished syncing machineOSBuilder %q (%v)", key, time.Since(startTime)) + // }() _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } - machineconfigpool, err := ctrl.mcpLister.Get(name) + isConfig := false + var machineOSConfig *mcfgv1alpha1.MachineOSConfig + machineOSBuild, err := ctrl.machineOSBuildLister.Get(name) if k8serrors.IsNotFound(err) { - klog.V(2).Infof("MachineConfigPool %v has been deleted", key) - return nil - } - if err != nil { - return err + // if this is not an existing build. This means our machineOsConfig changed + isConfig = true + machineOSConfig, err = ctrl.machineOSConfigLister.Get(name) + if k8serrors.IsNotFound(err) { + return nil + } } + if !isConfig { + for _, cond := range machineOSBuild.Status.Conditions { + if cond.Status == metav1.ConditionTrue { + switch mcfgv1alpha1.BuildProgress(cond.Type) { + case mcfgv1alpha1.MachineOSBuildPrepared: + klog.V(4).Infof("Build %s is build prepared and pending", name) + return nil + case mcfgv1alpha1.MachineOSBuilding: + klog.V(4).Infof("Build %s is building", name) + return nil + case mcfgv1alpha1.MachineOSBuildFailed: + klog.V(4).Infof("Build %s is failed", name) + return nil + case mcfgv1alpha1.MachineOSBuildInterrupted: + klog.V(4).Infof("Build %s is interrupted, requeueing", name) + ctrl.enqueueMachineOSBuild(machineOSBuild) + case mcfgv1alpha1.MachineOSBuildSucceeded: + klog.V(4).Infof("Build %s has successfully built", name) + return nil + default: + machineOSConfig, err := ctrl.machineOSConfigLister.Get(machineOSBuild.Spec.MachineOSConfig.Name) + if err != nil { + return err + } + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, machineOSConfig, machineOSBuild, machineOSBuild) + if err != nil { + return err + } + if doABuild { + ctrl.startBuildForMachineConfigPool(machineOSConfig, machineOSBuild) + } - // TODO: Doing a deep copy of this pool object from our cache and using it to - // determine our next course of action sometimes causes a race condition. I'm - // not sure if it's better to get a current copy from the API server or what. - // pool := machineconfigpool.DeepCopy() - pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), machineconfigpool.Name, metav1.GetOptions{}) - if err != nil { - return err + } + + } + } + } else { + // this is a config change or a config CREATION. We need to possibly make a mosb for this build. The updated config is handlded in the updateMachineOSConfig function + // if ctrl.imageBuilder. + var buildExists bool + var status *mcfgv1alpha1.MachineOSBuildStatus + machineOSBuild, buildExists = ctrl.doesMOSBExist(machineOSConfig) + if !buildExists { + machineOSBuild, status, err = ctrl.CreateBuildFromConfig(machineOSConfig) + if err != nil { + return err + } + machineOSBuild.Status = *status + if err := ctrl.startBuildForMachineConfigPool(machineOSConfig, machineOSBuild); err != nil { + ctrl.syncAvailableStatus(machineOSBuild) + return err + } + return nil + } } + return ctrl.syncAvailableStatus(machineOSBuild) +} - ps := newPoolState(pool) +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() + curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() + klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) - // Not a layered pool, so stop here. - if !ps.IsLayered() { - klog.V(4).Infof("MachineConfigPool %s is not opted-in for layering, ignoring", pool.Name) - return nil + moscOld, mosbOld, _ := ctrl.getConfigAndBuildForPool(oldPool) + moscNew, mosbNew, _ := ctrl.getConfigAndBuildForPool(curPool) + + doABuild, err := ctrlcommon.BuildDueToPoolChange(ctrl.imageBuilder, oldPool, curPool, moscNew, mosbNew) + if err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } + //reBuild := shouldWeRebuild(oldPool, curPool) + switch { - case ps.IsInterrupted(): - klog.V(4).Infof("MachineConfigPool %s is build interrupted, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsDegraded(): - klog.V(4).Infof("MachineConfigPool %s is degraded, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsRenderDegraded(): - klog.V(4).Infof("MachineConfigPool %s is render degraded, requeueing", pool.Name) - ctrl.enqueueMachineConfigPool(pool) - return nil - case ps.IsBuildPending(): - klog.V(4).Infof("MachineConfigPool %s is build pending", pool.Name) - return nil - case ps.IsBuilding(): - klog.V(4).Infof("MachineConfigPool %s is building", pool.Name) - return nil - case ps.IsBuildSuccess(): - klog.V(4).Infof("MachineConfigPool %s has successfully built", pool.Name) - return nil - default: - shouldBuild, err := shouldWeDoABuild(ctrl.imageBuilder, pool, pool) + // We've transitioned from a layered pool to a non-layered pool. + case ctrlcommon.IsLayeredPool(moscOld, mosbOld) && !ctrlcommon.IsLayeredPool(moscNew, mosbNew): + klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) + if err := ctrl.finalizeOptOut(moscNew, mosbNew, curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // We need to do a build. + case doABuild: + klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) + var status *mcfgv1alpha1.MachineOSBuildStatus + mosbNew, status, err = ctrl.CreateBuildFromConfig(moscNew) if err != nil { - return fmt.Errorf("could not determine if a build is required for MachineConfigPool %q: %w", pool.Name, err) + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } + mosbNew.Status = *status - if shouldBuild { - return ctrl.startBuildForMachineConfigPool(ps) + if err := ctrl.startBuildForMachineConfigPool(moscNew, mosbNew); err != nil { + ctrl.syncAvailableStatus(mosbNew) + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return } - - klog.V(4).Infof("Nothing to do for pool %q", pool.Name) + // // We need to restart build due to config change + // case reBuild: + // klog.V(4).Infof("MachineConfigPool %s requires a build restart", curPool.Name) + // if err := ctrl.restartBuildForMachineConfigPool(newPoolState(oldPool), newPoolState(curPool)); err != nil { + // klog.Errorln(err) + // ctrl.handleErr(err, curPool.Name) + // } + // Everything else. + default: + klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) } - - // For everything else - return ctrl.syncAvailableStatus(pool) } -func (ctrl *Controller) markBuildInterrupted(ps *poolState) error { - klog.Errorf("Build interrupted for pool %s", ps.Name()) +func (ctrl *Controller) markBuildInterrupted(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.Errorf("Build %s interrupted for pool %s", mosb.Name, mosc.Spec.MachineConfigPool.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Reason: "BuildInterrupted", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Running", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionTrue, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionTrue, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - ps.pool.Spec.Configuration.Source = ps.pool.Spec.Configuration.Source[:len(ps.pool.Spec.Configuration.Source)-1] - - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) + // update mosc status + return ctrl.syncAvailableStatus(bs.Build) }) } // Marks a given MachineConfigPool as a failed build. -func (ctrl *Controller) markBuildFailed(ps *poolState) error { - klog.Errorf("Build failed for pool %s", ps.Name()) +func (ctrl *Controller) markBuildFailed(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.Errorf("Build %s failed for pool %s", mosb.Name, mosc.Spec.MachineConfigPool.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Reason: "BuildFailed", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionTrue, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.syncFailingStatus(ps.MachineConfigPool(), fmt.Errorf("build failed")) + return ctrl.syncFailingStatus(mosc, bs.Build, fmt.Errorf("BuildFailed")) }) + } // Marks a given MachineConfigPool as the build is in progress. -func (ctrl *Controller) markBuildInProgress(ps *poolState) error { - klog.Infof("Build in progress for MachineConfigPool %s, config %s", ps.Name(), ps.CurrentMachineConfig()) +func (ctrl *Controller) markBuildInProgress(mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.V(4).Infof("Build %s in progress for config %s", mosb.Name, mosb.Spec.DesiredConfig.Name) return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } - ps := newPoolState(mcp) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs := ctrlcommon.NewMachineOSBuildState(mosb) + + bs.SetBuildConditions([]metav1.Condition{ + { + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionTrue, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Reason: "BuildRunning", - Status: corev1.ConditionTrue, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.syncAvailableStatus(ps.MachineConfigPool()) + return ctrl.syncAvailableStatus(mosb) }) } // Deletes the ephemeral objects we created to perform this specific build. -func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreMissing bool) error { +func (ctrl *Controller) postBuildCleanup(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, ignoreMissing bool) error { // Delete the actual build object itself. deleteBuildObject := func() error { - err := ctrl.imageBuilder.DeleteBuildObject(pool) + err := ctrl.imageBuilder.DeleteBuildObject(mosb, mosc) if err == nil { - klog.Infof("Deleted build object %s", newImageBuildRequest(pool).getBuildName()) + klog.Infof("Deleted build object %s", newImageBuildRequest(mosc, mosb).getBuildName()) } return err @@ -748,7 +783,7 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM // Delete the ConfigMap containing the MachineConfig. deleteMCConfigMap := func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getMCConfigMapName(), metav1.DeleteOptions{}) @@ -761,7 +796,7 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM // Delete the ConfigMap containing the rendered Dockerfile. deleteDockerfileConfigMap := func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDockerfileConfigMapName(), metav1.DeleteOptions{}) @@ -791,183 +826,207 @@ func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreM ) } -// Marks a given MachineConfigPool as build successful and cleans up after itself. -func (ctrl *Controller) markBuildSucceeded(ps *poolState) error { - klog.Infof("Build succeeded for MachineConfigPool %s, config %s", ps.Name(), ps.CurrentMachineConfig()) - - pool := ps.MachineConfigPool() - - // Get the final image pullspec. - imagePullspec, err := ctrl.imageBuilder.FinalPullspec(pool) - if err != nil { - return fmt.Errorf("could not get final image pullspec for pool %s: %w", ps.Name(), err) - } - - if imagePullspec == "" { - return fmt.Errorf("image pullspec empty for pool %s", ps.Name()) +// If one wants to opt out, this removes all of the statuses and object +// references from a given MachineConfigPool. +func (ctrl *Controller) finalizeOptOut(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, pool *mcfgv1.MachineConfigPool) error { + if err := ctrl.postBuildCleanup(mosc, mosb, true); err != nil { + return err } + return nil +} - // Perform the post-build cleanup. - if err := ctrl.postBuildCleanup(pool, false); err != nil { - return fmt.Errorf("could not do post-build cleanup: %w", err) - } +// Marks a given MachineConfigPool as build successful and cleans up after itself. +func (ctrl *Controller) markBuildSucceeded(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + klog.V(4).Infof("Build %s succeeded for MachineConfigPool %s, config %s", mosb.Name, mosc.Spec.MachineConfigPool.Name, mosb.Spec.DesiredConfig.Name) - // Perform the MachineConfigPool update. return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) + // REPLACE FINAL PULLSPEC WITH SHA HERE USING ctrl.imagebuilder.FinalPullspec + ibr := newImageBuildRequest(mosc, mosb) + digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) + if err != nil { + return err + } + + sha, err := ParseImagePullspec(mosb.Spec.RenderedImagePushspec, digestConfigMap.Data["digest"]) if err != nil { return err } - ps := newPoolState(mcp) + // now, all we need is to make sure this is used all around. (node controller, getters, etc) + mosc.Status.CurrentImagePullspec = sha + mosb.Status.FinalImagePushspec = sha - // Set the annotation or field to point to the newly-built container image. - klog.V(4).Infof("Setting new image pullspec for %s to %s", ps.Name(), imagePullspec) - ps.SetImagePullspec(imagePullspec) + if err := ctrl.postBuildCleanup(mosc, mosb, false); err != nil { + return fmt.Errorf("could not do post-build cleanup: %w", err) + } - // Remove the build object reference from the MachineConfigPool since we're - // not using it anymore. - ps.DeleteBuildRefForCurrentMachineConfig() + bs := ctrlcommon.NewMachineOSBuildState(mosb) - // Adjust the MachineConfigPool status to indicate success. - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Reason: "BuildSucceeded", - Status: corev1.ConditionTrue, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolDegraded, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(MachineOSBuildReady), + Status: metav1.ConditionTrue, + Reason: "Ready", + Message: "Build Ready", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionTrue, + Reason: "Ready", + Message: "Build Ready", }, }) - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) + return ctrl.updateConfigAndBuild(mosc, bs.Build) }) } // Marks a given MachineConfigPool as build pending. -func (ctrl *Controller) markBuildPendingWithObjectRef(ps *poolState, objRef corev1.ObjectReference) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.Name(), metav1.GetOptions{}) - if err != nil { - return err - } +func (ctrl *Controller) markBuildPendingWithObjectRef(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, objRef corev1.ObjectReference) error { + klog.V(4).Infof("Build %s for pool %s marked pending with object reference %v", mosb.Name, mosc.Spec.MachineConfigPool.Name, objRef) - ps := newPoolState(mcp) - - klog.Infof("Build for %s marked pending with object reference %v", ps.Name(), objRef) + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + bs := ctrlcommon.NewMachineOSBuildState(mosb) - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ + bs.SetBuildConditions([]metav1.Condition{ { - Type: mcfgv1.MachineConfigPoolBuildInterrupted, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildPrepared), + Status: metav1.ConditionTrue, + Reason: "Prepared", + Message: "Build Prepared and Pending", }, { - Type: mcfgv1.MachineConfigPoolBuildFailed, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", }, { - Type: mcfgv1.MachineConfigPoolBuildSuccess, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", }, { - Type: mcfgv1.MachineConfigPoolBuilding, - Status: corev1.ConditionFalse, + Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", }, { - Type: mcfgv1.MachineConfigPoolBuildPending, - Reason: "BuildPending", - Status: corev1.ConditionTrue, + Type: string(MachineOSBuildReady), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + { + Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", }, }) - // If the MachineConfigPool has the build object reference, we just want to - // update the MachineConfigPool's status. - if ps.HasBuildObjectRef(objRef) { - return ctrl.syncAvailableStatus(ps.MachineConfigPool()) + mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), mosc.Spec.MachineConfigPool.Name, metav1.GetOptions{}) + if err != nil { + return err } - // If we added the build object reference, we need to update both the - // MachineConfigPool itself and its status. - if err := ps.AddBuildObjectRef(objRef); err != nil { - return err + mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, objRef) + ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + // add obj ref to mosc + + if bs.Build.Status.BuilderReference == nil { + mosb.Status.BuilderReference = &mcfgv1alpha1.MachineOSBuilderReference{ImageBuilderType: mosc.Spec.BuildInputs.ImageBuilder.ImageBuilderType, PodImageBuilder: &mcfgv1alpha1.ObjectReference{ + Name: objRef.Name, + Group: objRef.GroupVersionKind().Group, + Namespace: objRef.Namespace, + Resource: objRef.ResourceVersion, + }} } + return ctrl.syncAvailableStatus(bs.Build) - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) }) } -func (ctrl *Controller) updatePoolAndSyncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { - // We need to do an API server round-trip to ensure all of our mutations get - // propagated. - updatedPool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) +func (ctrl *Controller) updateConfigSpec(mosc *mcfgv1alpha1.MachineOSConfig) error { + _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Update(context.TODO(), mosc, metav1.UpdateOptions{}) if err != nil { - return fmt.Errorf("could not update MachineConfigPool %q: %w", pool.Name, err) + return fmt.Errorf("could not update MachineOSConfig %q: %w", mosc.Name, err) } - - updatedPool.Status = pool.Status - - return ctrl.syncAvailableStatus(updatedPool) + return nil } +func (ctrl *Controller) updateConfigAndBuild(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { + _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().UpdateStatus(context.TODO(), mosc, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineOSConfig%q: %w", mosc.Name, err) + } + newMosb, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Update(context.TODO(), mosb, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineOSBuild %q: %w", mosb.Name, err) + } -// Machine Config Pools + newMosb.Status = mosb.Status -func (ctrl *Controller) addMachineConfigPool(obj interface{}) { - pool := obj.(*mcfgv1.MachineConfigPool).DeepCopy() - klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) - ctrl.enqueueMachineConfigPool(pool) + return ctrl.syncAvailableStatus(newMosb) } -func (ctrl *Controller) getBuildInputs(ps *poolState) (*buildInputs, error) { +// Prepares all of the objects needed to perform an image build. +func (ctrl *Controller) prepareForBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (ImageBuildRequest, error) { + ibr := newImageBuildRequestFromBuildInputs(mosb, mosc) + + // populate the "optional" fields, if the user did not specify them osImageURL, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), machineConfigOSImageURLConfigMapName, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("could not get OS image URL: %w", err) + return ibr, fmt.Errorf("could not get OS image URL: %w", err) } + moscNew := mosc.DeepCopy() - onClusterBuildConfig, err := ctrl.getOnClusterBuildConfig(ps) - if err != nil { - return nil, fmt.Errorf("could not get configmap %q: %w", OnClusterBuildConfigMapName, err) + url := newExtensionsImageInfo(osImageURL, mosc) + if len(moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec) == 0 { + moscNew.Spec.BuildInputs.BaseOSExtensionsImagePullspec = url.Pullspec } - - customDockerfiles, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), customDockerfileConfigMapName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, fmt.Errorf("could not retrieve %s ConfigMap: %w", customDockerfileConfigMapName, err) + url = newBaseImageInfo(osImageURL, mosc) + if len(moscNew.Spec.BuildInputs.BaseOSImagePullspec) == 0 { + moscNew.Spec.BuildInputs.BaseOSImagePullspec = url.Pullspec + moscNew.Spec.BuildInputs.ReleaseVersion = osImageURL.Data[releaseVersionConfigKey] } - currentMC := ps.CurrentMachineConfig() + // make sure to get these new settings + ibr.MachineOSConfig = moscNew - mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), currentMC, metav1.GetOptions{}) + mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), mosb.Spec.DesiredConfig.Name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("could not get MachineConfig %s: %w", currentMC, err) - } - - inputs := &buildInputs{ - onClusterBuildConfig: onClusterBuildConfig, - osImageURL: osImageURL, - customDockerfiles: customDockerfiles, - pool: ps.MachineConfigPool(), - machineConfig: mc, + return ibr, err } - return inputs, nil -} - -// Prepares all of the objects needed to perform an image build. -func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, error) { - ibr := newImageBuildRequestFromBuildInputs(inputs) - - mcConfigMap, err := ibr.toConfigMap(inputs.machineConfig) + mcConfigMap, err := ibr.toConfigMap(mc) // ?????? if err != nil { - return ImageBuildRequest{}, fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", inputs.machineConfig.Name, err) + return ImageBuildRequest{}, fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", mosb.Spec.DesiredConfig.Name, err) // ???? } _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), mcConfigMap, metav1.CreateOptions{}) @@ -975,7 +1034,7 @@ func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, return ImageBuildRequest{}, fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", mcConfigMap.Name, err) } - klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", inputs.machineConfig.Name, mcConfigMap.Name) + klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", mosb.Spec.DesiredConfig.Name, mcConfigMap.Name) dockerfileConfigMap, err := ibr.dockerfileToConfigMap() if err != nil { @@ -995,118 +1054,80 @@ func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, // Determines if we should run a build, then starts a build pod to perform the // build, and updates the MachineConfigPool with an object reference for the // build pod. -func (ctrl *Controller) startBuildForMachineConfigPool(ps *poolState) error { - - if ctrlcommon.DoARebuild(ps.pool) { - // delete FAILED build attempts and builds - // delete rendered containerfile, MC, configmaps etc. - // Delete the actual build object itself. - // if we are rebuilding, we cannot ignore DNE. All of these objects should exist. - err := ctrl.postBuildCleanup(ps.pool, false) - if err != nil { - return fmt.Errorf("Could not update pool when triggering a rebuild: %v", err) - } - // remove annotation - delete(ps.pool.Labels, ctrlcommon.RebuildPoolLabel) - err = ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) - if err != nil { - return fmt.Errorf("Could not update pool when triggering a rebuild: %v", err) - } +func (ctrl *Controller) startBuildForMachineConfigPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { - } - inputs, err := ctrl.getBuildInputs(ps) - if err != nil { - return fmt.Errorf("could not fetch build inputs: %w", err) - } + // we need to add osImageURL to mosbuild, will reduce api calls to configmaps + // ocb config will live in th mosb + // pool will live in the mosb + // mc we can get based off the pool specified in the mosb.... though, given how we could use this in two places - ibr, err := ctrl.prepareForBuild(inputs) + ourConfig, err := ctrl.machineOSConfigLister.Get(mosb.Spec.MachineOSConfig.Name) if err != nil { - return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ps.Name(), err) + return err } - - objRef, err := ctrl.imageBuilder.StartBuild(ibr) - + // Replace the user-supplied tag (if present) with the name of the + // rendered MachineConfig for uniqueness. This will also allow us to + // eventually do a pre-build registry query to determine if we need to + // perform a build. + named, err := reference.ParseNamed(ourConfig.Spec.BuildInputs.RenderedImagePushspec) if err != nil { return err } - return ctrl.markBuildPendingWithObjectRef(ps, *objRef) -} - -// Gets the ConfigMap which specifies the name of the base image pull secret, final image pull secret, and final image pullspec. -func (ctrl *Controller) getOnClusterBuildConfig(ps *poolState) (*corev1.ConfigMap, error) { - onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) + tagged, err := reference.WithTag(named, mosb.Spec.DesiredConfig.Name) if err != nil { - return nil, fmt.Errorf("could not get build controller config %q: %w", OnClusterBuildConfigMapName, err) - } - - requiredKeys := []string{ - BaseImagePullSecretNameConfigKey, - FinalImagePushSecretNameConfigKey, - FinalImagePullspecConfigKey, + return fmt.Errorf("could not add tag %s to image pullspec %s: %w", mosb.Spec.DesiredConfig.Name, ourConfig.Spec.BuildInputs.RenderedImagePushspec, err) } - needToUpdateConfigMap := false - finalImagePullspecWithTag := "" - - currentMC := ps.CurrentMachineConfig() + ourConfig.Status.CurrentImagePullspec = tagged.String() - for _, key := range requiredKeys { - val, ok := onClusterBuildConfigMap.Data[key] - if !ok { - return nil, fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + secrets := make(map[string]string) + secrets["push"] = ourConfig.Spec.BuildInputs.RenderedImagePushSecret.Name + secrets["pull"] = ourConfig.Spec.BuildInputs.BaseImagePullSecret.Name + updateMOSC := false + for key, s := range secrets { + if s == "" { + continue } - - if key == BaseImagePullSecretNameConfigKey || key == FinalImagePushSecretNameConfigKey { - secret, err := ctrl.validatePullSecret(val) - if err != nil { - return nil, err - } - - if strings.Contains(secret.Name, "canonical") { - klog.Infof("Updating build controller config %s to indicate we have a canonicalized secret %s", OnClusterBuildConfigMapName, secret.Name) - onClusterBuildConfigMap.Data[key] = secret.Name - needToUpdateConfigMap = true - } + newS, err := ctrl.validatePullSecret(s) + if err != nil { + return err } - if key == FinalImagePullspecConfigKey { - // Replace the user-supplied tag (if present) with the name of the - // rendered MachineConfig for uniqueness. This will also allow us to - // eventually do a pre-build registry query to determine if we need to - // perform a build. - named, err := reference.ParseNamed(val) - if err != nil { - return nil, fmt.Errorf("could not parse %s with %q: %w", key, val, err) - } - - tagged, err := reference.WithTag(named, currentMC) - if err != nil { - return nil, fmt.Errorf("could not add tag %s to image pullspec %s: %w", currentMC, val, err) + if strings.Contains(newS.Name, "canonical") { + updateMOSC = true + klog.Infof("Updating build controller config to indicate we have a canonicalized secret %s", newS.Name) + switch key { + case "push": + ourConfig.Spec.BuildInputs.RenderedImagePushSecret.Name = newS.Name + case "pull": + ourConfig.Spec.BuildInputs.BaseImagePullSecret.Name = newS.Name } - - finalImagePullspecWithTag = tagged.String() } } - // If we had to canonicalize a secret, that means the ConfigMap no longer - // points to the expected secret. So let's update the ConfigMap in the API - // server for the sake of consistency. - if needToUpdateConfigMap { - klog.Infof("Updating build controller config") - // TODO: Figure out why this causes failures with resourceVersions. - onClusterBuildConfigMap, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(context.TODO(), onClusterBuildConfigMap, metav1.UpdateOptions{}) - if err != nil { - return nil, fmt.Errorf("could not update configmap %q: %w", OnClusterBuildConfigMapName, err) - } + // ok + // we need to 1) replace tag + + ibr, err := ctrl.prepareForBuild(mosb, ourConfig) + if err != nil { + return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ourConfig.Spec.MachineConfigPool.Name, err) } - // We don't want to write this back to the API server since it's only useful - // for this specific build. TODO: Migrate this to the ImageBuildRequest - // object so that it's generated on-demand instead. - onClusterBuildConfigMap.Data[FinalImagePullspecConfigKey] = finalImagePullspecWithTag + objRef, err := ctrl.imageBuilder.StartBuild(ibr) + + if err != nil { + return err + } - return onClusterBuildConfigMap, err + err = ctrl.markBuildPendingWithObjectRef(mosc, mosb, *objRef) + if err != nil { + return err + } + if updateMOSC { + return ctrl.updateConfigSpec(ourConfig) + } + return nil } // Ensure that the supplied pull secret exists, is in the correct format, etc. @@ -1174,206 +1195,245 @@ func (ctrl *Controller) handleCanonicalizedPullSecret(secret *corev1.Secret) (*c return out, nil } -// If one wants to opt out, this removes all of the statuses and object -// references from a given MachineConfigPool. -func (ctrl *Controller) finalizeOptOut(ps *poolState) error { - if err := ctrl.postBuildCleanup(ps.MachineConfigPool(), true); err != nil { - return err - } - - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), ps.MachineConfigPool().Name, metav1.GetOptions{}) - if err != nil { - return err - } - - ps := newPoolState(mcp) - ps.DeleteBuildRefForCurrentMachineConfig() - ps.ClearImagePullspec() - ps.ClearAllBuildConditions() +func (ctrl *Controller) addMachineOSConfig(cur interface{}) { + m := cur.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + ctrl.enqueueMachineOSConfig(m) + klog.V(4).Infof("Adding MachineOSConfig %s", m.Name) - return ctrl.updatePoolAndSyncAvailableStatus(ps.MachineConfigPool()) - }) } -// Fires whenever a MachineConfigPool is updated. -func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { - oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() - curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() - - klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) +func (ctrl *Controller) updateMachineOSConfig(old, cur interface{}) { + oldMOSC := old.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + curMOSC := cur.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() - doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, oldPool, curPool) - if err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + if equality.Semantic.DeepEqual(oldMOSC.Spec.BuildInputs, curMOSC.Spec.BuildInputs) { + // we do not want to trigger an update func just for MOSC status, we dont act on the status return } - switch { - // We've transitioned from a layered pool to a non-layered pool. - case ctrlcommon.IsLayeredPool(oldPool) && !ctrlcommon.IsLayeredPool(curPool): - klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) - if err := ctrl.finalizeOptOut(newPoolState(curPool)); err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + klog.Infof("Updating MachineOSConfig %s", oldMOSC.Name) + + doABuild := configChangeCauseBuild(oldMOSC, curMOSC) + if doABuild { + build, exists := ctrl.doesMOSBExist(curMOSC) + if exists { + ctrl.startBuildForMachineConfigPool(curMOSC, build) // ? + } + // if the mosb does not exist, lets just enqueue the mosc and let the sync handler take care of the new object creation + } + ctrl.enqueueMachineOSConfig(curMOSC) +} + +func (ctrl *Controller) deleteMachineOSConfig(cur interface{}) { + m, ok := cur.(*mcfgv1alpha1.MachineOSConfig) + // first, we need to stop and delete any existing builds. + mosb, err := ctrl.machineOSBuildLister.Get(fmt.Sprintf("%s-builder", m.Spec.MachineConfigPool.Name)) + if err == nil { + if running, _ := ctrl.imageBuilder.IsBuildRunning(mosb, m); running { + // we need to stop the build. + ctrl.imageBuilder.DeleteBuildObject(mosb, m) + ctrl.markBuildInterrupted(m, mosb) + } + ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Delete(context.TODO(), mosb.Name, metav1.DeleteOptions{}) + } + if !ok { + tombstone, ok := cur.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", cur)) return } - // We need to do a build. - case doABuild || (ctrlcommon.IsLayeredPool(curPool) && ctrlcommon.DoARebuild(curPool)): - klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) - if err := ctrl.startBuildForMachineConfigPool(newPoolState(curPool)); err != nil { - klog.Errorln(err) - ctrl.handleErr(err, curPool.Name) + m, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSConfig) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineOSConfig %#v", cur)) return } - // Everything else. - default: - klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) + } + klog.V(4).Infof("Deleting MachineOSBuild %s", m.Name) +} + +func (ctrl *Controller) updateMachineOSBuild(old, cur interface{}) { + oldMOSB := old.(*mcfgv1alpha1.MachineOSBuild).DeepCopy() + curMOSB := cur.(*mcfgv1alpha1.MachineOSBuild).DeepCopy() + + if equality.Semantic.DeepEqual(oldMOSB.Status, oldMOSB.Status) { + // we do not want to trigger an update func just for MOSB spec, we dont act on the spec + return + } + + klog.Infof("Updating MachineOSBuild %s", oldMOSB.Name) + ourConfig, err := ctrl.machineOSConfigLister.Get(curMOSB.Spec.MachineOSConfig.Name) + if err != nil { + return } - ctrl.enqueueMachineConfigPool(curPool) + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, ourConfig, oldMOSB, curMOSB) + if err != nil { + return + } + if doABuild { + ctrl.startBuildForMachineConfigPool(ourConfig, curMOSB) + } + ctrl.enqueueMachineOSBuild(curMOSB) } -// Fires whenever a MachineConfigPool is deleted. TODO: Wire up checks for -// deleting any in-progress builds. -func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { - pool, ok := obj.(*mcfgv1.MachineConfigPool) +func (ctrl *Controller) deleteMachineOSBuild(mosb interface{}) { + m, ok := mosb.(*mcfgv1alpha1.MachineOSBuild) if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + tombstone, ok := mosb.(cache.DeletedFinalStateUnknown) if !ok { - utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", mosb)) return } - pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + m, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSBuild) if !ok { - utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineOSBuild %#v", mosb)) return } } - klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) + klog.V(4).Infof("Deleting MachineOSBuild %s", m.Name) } -func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { +func (ctrl *Controller) syncAvailableStatus(mosb *mcfgv1alpha1.MachineOSBuild) error { // I'm not sure what the consequences are of not doing this. //nolint:gocritic // Leaving this here for review purposes. - /* - if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { - return nil - } - */ - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") - apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + sdegraded := apihelpers.NewMachineOSBuildCondition(string(mcfgv1alpha1.MachineOSBuildFailed), metav1.ConditionFalse, "MOSCAvailable", "MOSC") + apihelpers.SetMachineOSBuildCondition(&mosb.Status, *sdegraded) + + if _, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(context.TODO(), mosb, metav1.UpdateOptions{}); err != nil { return err } return nil } -func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) - apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) - if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { - klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) +func (ctrl *Controller) syncFailingStatus(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, err error) error { + sdegraded := apihelpers.NewMachineOSBuildCondition(string(mcfgv1alpha1.MachineOSBuildFailed), metav1.ConditionTrue, "BuildFailed", fmt.Sprintf("Failed to build configuration for pool %s: %v", mosc.Spec.MachineConfigPool.Name, err)) + apihelpers.SetMachineOSBuildCondition(&mosb.Status, *sdegraded) + if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(context.TODO(), mosb, metav1.UpdateOptions{}); updateErr != nil { + klog.Errorf("Error updating MachineOSBuild %s: %v", mosb.Name, updateErr) } return err } -// Determine if we have a config change. -func isPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { - return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +func configChangeCauseBuild(old, cur *mcfgv1alpha1.MachineOSConfig) bool { + return equality.Semantic.DeepEqual(old.Spec.BuildInputs, cur.Spec.BuildInputs) } -// Checks our pool to see if we can do a build. We base this off of a few criteria: -// 1. Is the pool opted into layering? -// 2. Do we have an object reference to an in-progress build? -// 3. Is the pool degraded? -// 4. Is our build in a specific state? -// -// Returns true if we are able to build. -func canPoolBuild(ps *poolState) bool { - // If we don't have a layered pool, we should not build. - if !ps.IsLayered() { - return false - } +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func shouldWeDoABuild(builder interface { + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) +}, mosc *mcfgv1alpha1.MachineOSConfig, oldMOSB, curMOSB *mcfgv1alpha1.MachineOSBuild) (bool, error) { + // get desired and current. If desired != current, + // assume we are doing a build. remove the whole layered pool annotation workflow - // If we have a reference to an in-progress build, we should not build. - if ps.HasBuildObjectForCurrentMachineConfig() { - return false - } + if oldMOSB.Spec.DesiredConfig != curMOSB.Spec.DesiredConfig { + // the desiredConfig changed. We need to do an update + // but check that there isn't already a build. + // If a build is found running, we should not do a build. + isRunning, err := builder.IsBuildRunning(curMOSB, mosc) - // If the pool is degraded, we should not build. - if ps.IsAnyDegraded() { - return false - } + return !isRunning, err - // If the pool is in any of these states, we should not build. - if ps.IsBuilding() { - return false + // check for image pull sped changing? } + return false, nil +} - if ps.IsBuildPending() { - return false +// Determines if a pod or build is managed by this controller by examining its labels. +func hasAllRequiredOSBuildLabels(labels map[string]string) bool { + requiredLabels := []string{ + ctrlcommon.OSImageBuildPodLabel, + targetMachineConfigPoolLabel, + desiredConfigLabel, } - if ps.IsBuildFailure() { - return false + for _, label := range requiredLabels { + if _, ok := labels[label]; !ok { + return false + } } return true } -// Determines if we should do a build based upon the state of our -// MachineConfigPool, the presence of a build pod, etc. -func shouldWeDoABuild(builder interface { - IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) -}, oldPool, curPool *mcfgv1.MachineConfigPool) (bool, error) { - ps := newPoolState(curPool) +func (ctrl *Controller) doesMOSBExist(config *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSBuild, bool) { + mosb, err := ctrl.machineOSBuildLister.Get(fmt.Sprintf("%s-builder", config.Spec.MachineConfigPool.Name)) + if err != nil && k8serrors.IsNotFound(err) { + return nil, false + } else if mosb != nil { + return mosb, true + } + return nil, false +} - // If we don't have a layered pool, we should not build. - poolStateSuggestsBuild := canPoolBuild(ps) && - // If we have a config change or we're missing an image pullspec label, we - // should do a build. - (isPoolConfigChange(oldPool, curPool) || !ps.HasOSImage()) && - // If we're missing a build pod reference, it likely means we don't need to - // do a build. - !ps.HasBuildObjectRefName(newImageBuildRequest(curPool).getBuildName()) +func (ctrl *Controller) CreateBuildFromConfig(config *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSBuildStatus, error) { + mcp, err := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + if err != nil { + return nil, nil, err + } + now := metav1.Now() + build := mcfgv1alpha1.MachineOSBuild{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineOSBuild", + APIVersion: "machineconfiguration.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-builder", config.Spec.MachineConfigPool.Name, mcp.Spec.Configuration.Name), + }, + Spec: mcfgv1alpha1.MachineOSBuildSpec{ + RenderedImagePushspec: config.Spec.BuildInputs.RenderedImagePushspec, + Version: 1, + ConfigGeneration: 1, + DesiredConfig: mcfgv1alpha1.RenderedMachineConfigReference{ + Name: mcp.Spec.Configuration.Name, + }, + MachineOSConfig: mcfgv1alpha1.MachineOSConfigReference{ + Name: config.Name, + }, + }, + Status: mcfgv1alpha1.MachineOSBuildStatus{ + BuildStart: &now, + }, + } + mosb, err := ctrl.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Create(context.TODO(), &build, metav1.CreateOptions{}) + return mosb, &build.Status, err +} - if !poolStateSuggestsBuild { - return false, nil +func (ctrl *Controller) getConfigAndBuildForPool(pool *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { + moscs, err := ctrl.machineOSConfigLister.List(labels.Everything()) + if err != nil { + return nil, nil, err } - // If a build is found running, we should not do a build. - isRunning, err := builder.IsBuildRunning(curPool) + mosbs, err := ctrl.machineOSBuildLister.List(labels.Everything()) + if err != nil { + return nil, nil, err + } - return !isRunning, err -} + var mosb *mcfgv1alpha1.MachineOSBuild + var mosc *mcfgv1alpha1.MachineOSConfig -// Enumerates all of the build-related MachineConfigPool condition types. -func getMachineConfigPoolBuildConditions() []mcfgv1.MachineConfigPoolConditionType { - return []mcfgv1.MachineConfigPoolConditionType{ - mcfgv1.MachineConfigPoolBuildFailed, - mcfgv1.MachineConfigPoolBuildPending, - mcfgv1.MachineConfigPoolBuildSuccess, - mcfgv1.MachineConfigPoolBuilding, + for _, config := range moscs { + if config.Spec.MachineConfigPool.Name == pool.Name { + mosc = config + break + } } -} -// Determines if a pod or build is managed by this controller by examining its labels. -func hasAllRequiredOSBuildLabels(labels map[string]string) bool { - requiredLabels := []string{ - ctrlcommon.OSImageBuildPodLabel, - targetMachineConfigPoolLabel, - desiredConfigLabel, + if mosc == nil { + return nil, nil, nil } - for _, label := range requiredLabels { - if _, ok := labels[label]; !ok { - return false + for _, build := range mosbs { + if build.Spec.MachineOSConfig.Name == mosc.Name { + if build.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + mosb = build + break + } } } - return true + return mosc, mosb, nil } diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go index 462c7057cd..e372c2e6da 100644 --- a/pkg/controller/build/build_controller_test.go +++ b/pkg/controller/build/build_controller_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "reflect" "strings" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" @@ -346,44 +345,6 @@ func (b *buildControllerTestFixture) startBuildControllerWithCustomPodBuilder() return clients } -// Helper that determines if the build is a success. -func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.HasOSImage() && - ps.GetOSImage() == expectedImagePullspecWithSHA && - ps.IsBuildSuccess() && - !ps.HasBuildObjectForCurrentMachineConfig() && - machineConfigPoolHasMachineConfigRefs(mcp) && - reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration) -} - -func isMCPBuildInProgress(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.IsBuilding() - -} - -func isMCPBuildSuccessMsg(mcp *mcfgv1.MachineConfigPool) string { - sb := &strings.Builder{} - - ps := newPoolState(mcp) - - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Has OS image? %v\n", ps.HasOSImage()) - fmt.Fprintf(sb, "Matches expected pullspec (%s)? %v\n", expectedImagePullspecWithSHA, ps.GetOSImage() == expectedImagePullspecWithSHA) - fmt.Fprintf(sb, "Is build success? %v\n", ps.IsBuildSuccess()) - fmt.Fprintf(sb, "Is degraded? %v\n", ps.IsDegraded()) - fmt.Fprintf(sb, "Has build object ref for current MachineConfig? %v. Build refs found: %v\n", ps.HasBuildObjectForCurrentMachineConfig(), ps.GetBuildObjectRefs()) - fmt.Fprintf(sb, "Has MachineConfig refs? %v\n", machineConfigPoolHasMachineConfigRefs(mcp)) - fmt.Fprintf(sb, "Spec.Configuration == Status.Configuration? %v\n", reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration)) - - return sb.String() -} - func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool { expectedMCP := newMachineConfigPool(pool.Name) ps := newPoolState(pool) @@ -397,33 +358,6 @@ func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool return true } -// Helper that determines if the build was a failure. -func isMCPBuildFailure(mcp *mcfgv1.MachineConfigPool) bool { - ps := newPoolState(mcp) - - return ps.IsLayered() && - ps.IsBuildFailure() && - ps.IsDegraded() && - ps.HasBuildObjectForCurrentMachineConfig() && - machineConfigPoolHasMachineConfigRefs(mcp) && - reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration) -} - -func isMCPBuildFailureMsg(mcp *mcfgv1.MachineConfigPool) string { - sb := &strings.Builder{} - - ps := newPoolState(mcp) - - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Is build failure? %v\n", ps.IsBuildFailure()) - fmt.Fprintf(sb, "Is degraded? %v\n", ps.IsDegraded()) - fmt.Fprintf(sb, "Has build object ref for current MachineConfig? %v. Build refs found: %v\n", ps.HasBuildObjectForCurrentMachineConfig(), ps.GetBuildObjectRefs()) - fmt.Fprintf(sb, "Has MachineConfig refs? %v\n", machineConfigPoolHasMachineConfigRefs(mcp)) - fmt.Fprintf(sb, "Spec.Configuration == Status.Configuration? %v\n", reflect.DeepEqual(mcp.Spec.Configuration, mcp.Status.Configuration)) - - return sb.String() -} - // Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. func testOptInMCPCustomBuildPod(ctx context.Context, t *testing.T, cs *Clients, poolName string) { mcp := optInMCP(ctx, t, cs, poolName) @@ -554,13 +488,13 @@ func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optIn checkFunc := func(mcp *mcfgv1.MachineConfigPool) bool { ps := newPoolState(mcp) - if ps.IsLayered() { - return false - } + //if ps.IsLayered() { + // return false + //} - if ps.HasBuildObjectForCurrentMachineConfig() { - return false - } + //if ps.HasBuildObjectForCurrentMachineConfig() { + // return false + //} if len(ps.GetAllBuildConditions()) != 0 { return false @@ -573,8 +507,8 @@ func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optIn sb := &strings.Builder{} ps := newPoolState(mcp) - fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) - fmt.Fprintf(sb, "Has build object for current MachineConfig? %v\n", ps.HasBuildObjectForCurrentMachineConfig()) + //fmt.Fprintf(sb, "Is layered? %v\n", ps.IsLayered()) + //fmt.Fprintf(sb, "Has build object for current MachineConfig? %v\n", ps.HasBuildObjectForCurrentMachineConfig()) fmt.Fprintf(sb, "Build objects: %v\n", ps.GetBuildObjectRefs()) buildConditions := ps.GetAllBuildConditions() fmt.Fprintf(sb, "Has no build conditions? %v. Build conditions: %v\n", len(buildConditions) == 0, buildConditions) diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go index dbf6f03ffc..afcd137621 100644 --- a/pkg/controller/build/fixtures_test.go +++ b/pkg/controller/build/fixtures_test.go @@ -602,9 +602,9 @@ func getPoolFailureMsg(mcp *mcfgv1.MachineConfigPool, expectedToHaveBuildRef boo fmt.Fprintf(msg, "Is only one build condition true? %v\n", isOnlyOneBuildConditionTrue(mcp)) - hasBuildObject := ps.HasBuildObjectForCurrentMachineConfig() + //hasBuildObject := ps.HasBuildObjectForCurrentMachineConfig() buildObjectRefs := ps.GetBuildObjectRefs() - fmt.Fprintf(msg, "Has ref? %v. Expected: %v. Actual: %v.\n", hasBuildObject, expectedToHaveBuildRef, hasBuildObject == expectedToHaveBuildRef) + //fmt.Fprintf(msg, "Has ref? %v. Expected: %v. Actual: %v.\n", hasBuildObject, expectedToHaveBuildRef, hasBuildObject == expectedToHaveBuildRef) if expectedToHaveBuildRef { fmt.Fprintf(msg, "Has only one build ref? %v. Build refs found: %v\n", len(buildObjectRefs) == 1, buildObjectRefs) @@ -641,11 +641,11 @@ func poolReachesExpectedBuildState(mcp *mcfgv1.MachineConfigPool, expectedToHave return false } - ps := newPoolState(mcp) + //ps := newPoolState(mcp) - if expectedToHaveBuildRef { - return ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 1 - } + //if expectedToHaveBuildRef { + // return ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 1 + //} - return !ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 0 + //return !ps.HasBuildObjectForCurrentMachineConfig() && len(ps.GetBuildObjectRefs()) == 0 } diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go index 2fb0606d98..2571b9dfee 100644 --- a/pkg/controller/build/helpers.go +++ b/pkg/controller/build/helpers.go @@ -12,6 +12,9 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/opencontainers/go-digest" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -19,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" ) const ( @@ -106,7 +108,7 @@ func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (s // Parses an image pullspec from a string and an image SHA and replaces any // tags on the pullspec with the provided image SHA. -func parseImagePullspec(pullspec, imageSHA string) (string, error) { +func ParseImagePullspec(pullspec, imageSHA string) (string, error) { imageDigest, err := digest.Parse(imageSHA) if err != nil { return "", err @@ -241,56 +243,39 @@ func ignoreIsNotFoundErr(err error) error { return nil } -// ValidateOnClusterBuildConfig validates the existence of the on-cluster-build-config ConfigMap and the presence of the secrets it refers to. -func ValidateOnClusterBuildConfig(kubeclient clientset.Interface) error { - // Validate the presence of the on-cluster-build-config ConfigMap - cm, err := kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) - if err != nil && k8serrors.IsNotFound(err) { - return fmt.Errorf("%s ConfigMap missing, did you create it?", OnClusterBuildConfigMapName) - } - +// ValidateOnClusterBuildConfig validates the existence of the MachineOSConfig and the required build inputs. +func ValidateOnClusterBuildConfig(kubeclient clientset.Interface, mcfgclient versioned.Interface, layeredMCPs []*mcfgv1.MachineConfigPool) error { + // Validate the presence of the MachineOSConfig + machineOSConfigs, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) if err != nil { - return fmt.Errorf("could not get ConfigMap %s: %w", OnClusterBuildConfigMapName, err) + return err } - secretNames := []string{BaseImagePullSecretNameConfigKey, FinalImagePushSecretNameConfigKey} - imagePullspecs := []string{FinalImagePullspecConfigKey} - - // Validate the presence of secrets it refers to - for _, key := range secretNames { - val, ok := cm.Data[key] - if !ok { - return fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + moscForPoolExists := false + var moscForPool *mcfgv1alpha1.MachineOSConfig + for _, pool := range layeredMCPs { + moscForPoolExists = false + for _, mosc := range machineOSConfigs.Items { + if mosc.Spec.MachineConfigPool.Name == pool.Name { + moscForPoolExists = true + moscForPool = &mosc + break + } } + if !moscForPoolExists { + return fmt.Errorf("MachineOSConfig for pool %s missing, did you create it?", pool.Name) - if val == "" { - return fmt.Errorf("key %q in configmap %s has an empty value", key, OnClusterBuildConfigMapName) } - - if err := validateSecret(kubeclient, val); err != nil { + if err := validateSecret(kubeclient, moscForPool.Spec.BuildInputs.BaseImagePullSecret.Name); err != nil { return err } - } - - // Validate the image pullspec(s) it referes to. - for _, key := range imagePullspecs { - val, ok := cm.Data[key] - if !ok { - return fmt.Errorf("missing required key %q in configmap %s", key, OnClusterBuildConfigMapName) + if err := validateSecret(kubeclient, moscForPool.Spec.BuildInputs.RenderedImagePushSecret.Name); err != nil { + return err } - - if val == "" { - return fmt.Errorf("key %q in configmap %s has an empty value", key, OnClusterBuildConfigMapName) + if _, err := reference.ParseNamed(moscForPool.Spec.BuildInputs.RenderedImagePushspec); err != nil { + return err } - if _, err := reference.ParseNamed(val); err != nil { - return fmt.Errorf("could not parse %s with %q: %w", key, val, err) - } - } - - // Validate the image builder type from the ConfigMap - if _, err := GetImageBuilderType(cm); err != nil { - return err } return nil @@ -313,28 +298,3 @@ func validateSecret(kubeclient clientset.Interface, secretName string) error { return nil } - -// Determines which image builder to start based upon the imageBuilderType key -// in the on-cluster-build-config ConfigMap. Defaults to custom-pod-builder. -func GetImageBuilderType(cm *corev1.ConfigMap) (ImageBuilderType, error) { - configMapImageBuilder, ok := cm.Data[ImageBuilderTypeConfigMapKey] - cmBuilder := ImageBuilderType(configMapImageBuilder) - defaultBuilder := OpenshiftImageBuilder - - if !ok { - klog.Infof("%s not set, defaulting to %q", ImageBuilderTypeConfigMapKey, defaultBuilder) - return defaultBuilder, nil - } - - if ok && configMapImageBuilder == "" { - klog.Infof("%s empty, defaulting to %q", ImageBuilderTypeConfigMapKey, defaultBuilder) - return defaultBuilder, nil - } - - if !validImageBuilderTypes.Has(ImageBuilderType(configMapImageBuilder)) { - return "", &ErrInvalidImageBuilder{Message: fmt.Sprintf("invalid image builder type %q, valid types: %v", configMapImageBuilder, validImageBuilderTypes.UnsortedList()), InvalidType: configMapImageBuilder} - } - - klog.Infof("%s set to %q", ImageBuilderTypeConfigMapKey, configMapImageBuilder) - return cmBuilder, nil -} diff --git a/pkg/controller/build/image_build_controller.go b/pkg/controller/build/image_build_controller.go deleted file mode 100644 index dc6b98bd85..0000000000 --- a/pkg/controller/build/image_build_controller.go +++ /dev/null @@ -1,335 +0,0 @@ -package build - -import ( - "context" - "fmt" - "strings" - "time" - - buildv1 "github.com/openshift/api/build/v1" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" - "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" -) - -// Controller defines the build controller. -type ImageBuildController struct { - *Clients - *informers - - eventRecorder record.EventRecorder - - // The function to call whenever we've encountered a Build. This function is - // responsible for examining the Build to determine what state its in and map - // that state to the appropriate MachineConfigPool object. - buildHandler func(*buildv1.Build) error - - syncHandler func(pod string) error - enqueueBuild func(*buildv1.Build) - - buildLister buildlistersv1.BuildLister - - buildListerSynced cache.InformerSynced - - queue workqueue.RateLimitingInterface - - config BuildControllerConfig -} - -var _ ImageBuilder = (*ImageBuildController)(nil) - -// Returns a new image build controller. -func newImageBuildController( - ctrlConfig BuildControllerConfig, - clients *Clients, - buildHandler func(*buildv1.Build) error, -) *ImageBuildController { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(klog.Infof) - eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) - - ctrl := &ImageBuildController{ - Clients: clients, - informers: newInformers(clients), - eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-imagebuildcontroller"}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-imagebuildcontroller"), - config: ctrlConfig, - buildHandler: buildHandler, - } - - // As an aside, why doesn't the constructor here set up all the informers? - ctrl.buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ctrl.addBuild, - UpdateFunc: ctrl.updateBuild, - DeleteFunc: ctrl.deleteBuild, - }) - - ctrl.buildLister = ctrl.buildInformer.Lister() - ctrl.buildListerSynced = ctrl.buildInformer.Informer().HasSynced - - ctrl.syncHandler = ctrl.syncBuild - ctrl.enqueueBuild = ctrl.enqueueDefault - - return ctrl -} - -func (ctrl *ImageBuildController) enqueue(build *buildv1.Build) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.Add(key) -} - -func (ctrl *ImageBuildController) enqueueRateLimited(build *buildv1.Build) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.AddRateLimited(key) -} - -// enqueueAfter will enqueue a build after the provided amount of time. -func (ctrl *ImageBuildController) enqueueAfter(build *buildv1.Build, after time.Duration) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) - if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) - return - } - - ctrl.queue.AddAfter(key, after) -} - -// enqueueDefault calls a default enqueue function -func (ctrl *ImageBuildController) enqueueDefault(build *buildv1.Build) { - ctrl.enqueueAfter(build, ctrl.config.UpdateDelay) -} - -// Syncs Builds. -func (ctrl *ImageBuildController) syncBuild(key string) error { //nolint:dupl // This does have commonality with the PodBuildController. - start := time.Now() - defer func() { - klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) - }() - klog.Infof("Started syncing pod %s", key) - - _, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - // TODO: Why do I need to set the namespace here? - build, err := ctrl.buildLister.Builds(ctrlcommon.MCONamespace).Get(name) - if k8serrors.IsNotFound(err) { - klog.V(2).Infof("Build %v has been deleted", key) - return nil - } - if err != nil { - return err - } - - build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), build.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !hasAllRequiredOSBuildLabels(build.Labels) { - klog.Infof("Ignoring non-OS image build %s", build.Name) - return nil - } - - if err := ctrl.buildHandler(build); err != nil { - return fmt.Errorf("unable to update with build status: %w", err) - } - - klog.Infof("Updated MachineConfigPool with build status. Build %s in %s", build.Name, build.Status.Phase) - - return nil -} - -// Starts the Image Build Controller. -func (ctrl *ImageBuildController) Run(ctx context.Context, workers int) { - defer utilruntime.HandleCrash() - defer ctrl.queue.ShutDown() - - ctrl.informers.start(ctx) - - if !cache.WaitForCacheSync(ctx.Done(), ctrl.buildListerSynced) { - return - } - - klog.Info("Starting MachineOSBuilder-ImageBuildController") - defer klog.Info("Shutting down MachineOSBuilder-ImageBuildController") - - for i := 0; i < workers; i++ { - go wait.Until(ctrl.worker, time.Second, ctx.Done()) - } - - <-ctx.Done() -} - -// Gets the final image pullspec. In this case, we can interrogate the Build -// object for this information. -func (ctrl *ImageBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { - buildName := newImageBuildRequest(pool).getBuildName() - - build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("could not get build %s for pool %s: %w", buildName, pool.Name, err) - } - - // Get the image digest from the completed build and replace the tag with - // the digest. - if build.Status.OutputDockerImageReference == "" { - return "", fmt.Errorf("no image reference outputted") - } - - if build.Status.Output.To.ImageDigest == "" { - return "", fmt.Errorf("no image digest found") - } - - return parseImagePullspec(build.Status.OutputDockerImageReference, build.Status.Output.To.ImageDigest) -} - -// Deletes the underlying Build object. -func (ctrl *ImageBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { - buildName := newImageBuildRequest(pool).getBuildName() - return ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Delete(context.TODO(), buildName, metav1.DeleteOptions{}) -} - -// Determines if a build is currently running by looking for a corresponding Build. -func (ctrl *ImageBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { - buildName := newImageBuildRequest(pool).getBuildName() - - // First check if we have a build in progress for this MachineConfigPool and rendered config. - _, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return false, err - } - - return err == nil, nil -} - -// Starts a new build, assuming one is not found first. In that case, it -// returns an object reference to the preexisting Build object. -func (ctrl *ImageBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { - targetMC := ibr.Pool.Spec.Configuration.Name - - buildName := ibr.getBuildName() - - // TODO: Find a constant for this: - if !strings.HasPrefix(targetMC, "rendered-") { - return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) - } - - // First check if we have a build in progress for this MachineConfigPool and rendered config. - build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, err - } - - // This means we found a preexisting build build. - if build != nil && err == nil && hasAllRequiredOSBuildLabels(build.Labels) { - klog.Infof("Found preexisting OS image build (%s) for pool %s", build.Name, ibr.Pool.Name) - return toObjectRef(build), nil - } - - klog.Infof("Starting build for pool %s", ibr.Pool.Name) - klog.Infof("Build name: %s", buildName) - klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) - - build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuild(), metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("could not create OS image build: %w", err) - } - - klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, build.Name) - - return toObjectRef(build), nil -} - -// Fires whenever a Build is added. -func (ctrl *ImageBuildController) addBuild(obj interface{}) { - build := obj.(*buildv1.Build).DeepCopy() - klog.V(4).Infof("Adding Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) - if hasAllRequiredOSBuildLabels(build.Labels) { - ctrl.enqueueBuild(build) - } -} - -// Fires whenever a Build is updated. -func (ctrl *ImageBuildController) updateBuild(_, curObj interface{}) { - curBuild := curObj.(*buildv1.Build).DeepCopy() - - isOSImageBuild := hasAllRequiredOSBuildLabels(curBuild.Labels) - - klog.Infof("Updating build %s. Is OS image build? %v", curBuild.Name, isOSImageBuild) - - // Ignore non-OS image builds. - // TODO: Figure out if we can add the filter criteria onto the lister. - if !isOSImageBuild { - return - } - - klog.Infof("Build %s updated", curBuild.Name) - - ctrl.enqueueBuild(curBuild) -} - -func (ctrl *ImageBuildController) handleErr(err error, key interface{}) { - if err == nil { - ctrl.queue.Forget(key) - return - } - - if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { - klog.V(2).Infof("Error syncing build %v: %v", key, err) - ctrl.queue.AddRateLimited(key) - return - } - - utilruntime.HandleError(err) - klog.V(2).Infof("Dropping build %q out of the queue: %v", key, err) - ctrl.queue.Forget(key) - ctrl.queue.AddAfter(key, 1*time.Minute) -} - -func (ctrl *ImageBuildController) deleteBuild(obj interface{}) { - build := obj.(*buildv1.Build).DeepCopy() - klog.V(4).Infof("Deleting Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) - ctrl.enqueueBuild(build) -} - -// worker runs a worker thread that just dequeues items, processes them, and marks them done. -// It enforces that the syncHandler is never invoked concurrently with the same key. -func (ctrl *ImageBuildController) worker() { - for ctrl.processNextWorkItem() { - } -} - -func (ctrl *ImageBuildController) processNextWorkItem() bool { - key, quit := ctrl.queue.Get() - if quit { - return false - } - defer ctrl.queue.Done(key) - - err := ctrl.syncHandler(key.(string)) - ctrl.handleErr(err, key) - - return true -} diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go index 7bd2e00a58..779e11dc83 100644 --- a/pkg/controller/build/image_build_request.go +++ b/pkg/controller/build/image_build_request.go @@ -7,8 +7,8 @@ import ( "strings" "text/template" - buildv1 "github.com/openshift/api/build/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/helpers" corev1 "k8s.io/api/core/v1" @@ -43,82 +43,70 @@ type ImageInfo struct { // Represents the request to build a layered OS image. type ImageBuildRequest struct { - // The target MachineConfigPool - Pool *mcfgv1.MachineConfigPool - // The base OS image (derived from the machine-config-osimageurl ConfigMap) - BaseImage ImageInfo - // The extensions image (derived from the machine-config-osimageurl ConfigMap) - ExtensionsImage ImageInfo - // The final OS image (desired from the on-cluster-build-config ConfigMap) - FinalImage ImageInfo - // The OpenShift release version (derived from the machine-config-osimageurl ConfigMap) - ReleaseVersion string - // An optional user-supplied Dockerfile that gets injected into the build. - CustomDockerfile string -} - -type buildInputs struct { - onClusterBuildConfig *corev1.ConfigMap - osImageURL *corev1.ConfigMap - customDockerfiles *corev1.ConfigMap - pool *mcfgv1.MachineConfigPool - machineConfig *mcfgv1.MachineConfig + // The target Build object + MachineOSBuild *mcfgv1alpha1.MachineOSBuild + // the cofig the build is based off of + MachineOSConfig *mcfgv1alpha1.MachineOSConfig + // the containerfile designated from the MachineOSConfig + Containerfile string } // Constructs a simple ImageBuildRequest. -func newImageBuildRequest(pool *mcfgv1.MachineConfigPool) ImageBuildRequest { - return ImageBuildRequest{ - Pool: pool.DeepCopy(), +func newImageBuildRequest(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) *ImageBuildRequest { + ibr := &ImageBuildRequest{ + MachineOSConfig: mosc, + MachineOSBuild: mosb, } -} -// Populates the final image info from the on-cluster-build-config ConfigMap. -func newFinalImageInfo(inputs *buildInputs) ImageInfo { - return ImageInfo{ - Pullspec: inputs.onClusterBuildConfig.Data[FinalImagePullspecConfigKey], - PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[FinalImagePushSecretNameConfigKey], - }, + // only support noArch for now + for _, file := range mosc.Spec.BuildInputs.Containerfile { + if file.ContainerfileArch == mcfgv1alpha1.NoArch { + ibr.Containerfile = file.Content + break + } } + + return ibr } // Populates the base image info from both the on-cluster-build-config and // machine-config-osimageurl ConfigMaps. -func newBaseImageInfo(inputs *buildInputs) ImageInfo { +func newBaseImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { return ImageInfo{ - Pullspec: inputs.osImageURL.Data[baseOSContainerImageConfigKey], + Pullspec: osImageURL.Data[baseOSContainerImageConfigKey], PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[BaseImagePullSecretNameConfigKey], + Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, }, } } // Populates the extensions image info from both the on-cluster-build-config // and machine-config-osimageurl ConfigMaps. -func newExtensionsImageInfo(inputs *buildInputs) ImageInfo { +func newExtensionsImageInfo(osImageURL *corev1.ConfigMap, mosc *mcfgv1alpha1.MachineOSConfig) ImageInfo { return ImageInfo{ - Pullspec: inputs.osImageURL.Data[baseOSExtensionsContainerImageConfigKey], + Pullspec: osImageURL.Data[baseOSExtensionsContainerImageConfigKey], PullSecret: corev1.LocalObjectReference{ - Name: inputs.onClusterBuildConfig.Data[BaseImagePullSecretNameConfigKey], + Name: mosc.Spec.BuildInputs.BaseImagePullSecret.Name, }, } } // Constructs an ImageBuildRequest with all of the images populated from ConfigMaps -func newImageBuildRequestFromBuildInputs(inputs *buildInputs) ImageBuildRequest { - customDockerfile := "" - if inputs.customDockerfiles != nil { - customDockerfile = inputs.customDockerfiles.Data[inputs.pool.Name] +func newImageBuildRequestFromBuildInputs(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) ImageBuildRequest { + ibr := &ImageBuildRequest{ + MachineOSConfig: mosc, + MachineOSBuild: mosb, } - return ImageBuildRequest{ - Pool: inputs.pool.DeepCopy(), - BaseImage: newBaseImageInfo(inputs), - FinalImage: newFinalImageInfo(inputs), - ExtensionsImage: newExtensionsImageInfo(inputs), - ReleaseVersion: inputs.osImageURL.Data[releaseVersionConfigKey], - CustomDockerfile: customDockerfile, + // only support noArch for now + for _, file := range mosc.Spec.BuildInputs.Containerfile { + if file.ContainerfileArch == mcfgv1alpha1.NoArch { + ibr.Containerfile = file.Content + break + } } + + return *ibr } // Renders our Dockerfile and injects it into a ConfigMap for consumption by the image builder. @@ -191,67 +179,6 @@ func (i ImageBuildRequest) renderDockerfile() (string, error) { return out.String(), nil } -// Creates an OpenShift Image Builder build object prewired with all ConfigMaps -// / Secrets / etc. -func (i ImageBuildRequest) toBuild() *buildv1.Build { - skipLayers := buildv1.ImageOptimizationSkipLayers - - // The Build API requires the Dockerfile field to be set, even if you - // override it via a ConfigMap. - dockerfile := "FROM scratch" - - return &buildv1.Build{ - TypeMeta: metav1.TypeMeta{ - Kind: "Build", - }, - ObjectMeta: i.getObjectMeta(i.getBuildName()), - Spec: buildv1.BuildSpec{ - CommonSpec: buildv1.CommonSpec{ - // TODO: We may need to configure a Build Input here so we can wire up - // the pull secrets for the base OS image and the extensions image. - Source: buildv1.BuildSource{ - Type: buildv1.BuildSourceDockerfile, - Dockerfile: &dockerfile, - ConfigMaps: []buildv1.ConfigMapBuildSource{ - { - // Provides the rendered MachineConfig in a gzipped / - // base64-encoded format. - ConfigMap: corev1.LocalObjectReference{ - Name: i.getMCConfigMapName(), - }, - DestinationDir: "machineconfig", - }, - { - // Provides the rendered Dockerfile. - ConfigMap: corev1.LocalObjectReference{ - Name: i.getDockerfileConfigMapName(), - }, - }, - }, - }, - Strategy: buildv1.BuildStrategy{ - DockerStrategy: &buildv1.DockerBuildStrategy{ - // Squashing layers is good as long as it doesn't cause problems with what - // the users want to do. It says "some syntax is not supported" - ImageOptimizationPolicy: &skipLayers, - }, - Type: buildv1.DockerBuildStrategyType, - }, - Output: buildv1.BuildOutput{ - To: &corev1.ObjectReference{ - Name: i.FinalImage.Pullspec, - Kind: "DockerImage", - }, - PushSecret: &i.FinalImage.PullSecret, - ImageLabels: []buildv1.ImageLabel{ - {Name: "io.openshift.machineconfig.pool", Value: i.Pool.Name}, - }, - }, - }, - }, - } -} - // Creates a custom image build pod to build the final OS image with all // ConfigMaps / Secrets / etc. wired into it. func (i ImageBuildRequest) toBuildPod() *corev1.Pod { @@ -274,7 +201,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { }, { Name: "TAG", - Value: i.FinalImage.Pullspec, + Value: i.MachineOSConfig.Status.CurrentImagePullspec, }, { Name: "BASE_IMAGE_PULL_CREDS", @@ -344,7 +271,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { // contains the SHA256 from the container registry, and stores this // in a ConfigMap which is consumed after the pod stops. Name: "image-build", - Image: i.BaseImage.Pullspec, + Image: i.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec, Env: env, Command: append(command, podmanBuildScript), ImagePullPolicy: corev1.PullIfNotPresent, @@ -387,7 +314,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { Name: "base-image-pull-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.BaseImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildInputs.BaseImagePullSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -402,7 +329,7 @@ func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { Name: "final-image-push-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.FinalImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildOutputs.CurrentImagePullSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -445,7 +372,7 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { }, { Name: "TAG", - Value: i.FinalImage.Pullspec, + Value: i.MachineOSConfig.Status.CurrentImagePullspec, }, { Name: "BASE_IMAGE_PULL_CREDS", @@ -524,7 +451,7 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "wait-for-done", Env: env, Command: append(command, waitScript), - Image: i.BaseImage.Pullspec, + Image: i.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, VolumeMounts: volumeMounts, @@ -560,7 +487,7 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "base-image-pull-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.BaseImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildInputs.BaseImagePullSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -575,7 +502,7 @@ func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { Name: "final-image-push-creds", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: i.FinalImage.PullSecret.Name, + SecretName: i.MachineOSConfig.Spec.BuildInputs.RenderedImagePushSecret.Name, Items: []corev1.KeyToPath{ { Key: corev1.DockerConfigJsonKey, @@ -608,8 +535,8 @@ func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { Namespace: ctrlcommon.MCONamespace, Labels: map[string]string{ ctrlcommon.OSImageBuildPodLabel: "", - targetMachineConfigPoolLabel: i.Pool.Name, - desiredConfigLabel: i.Pool.Spec.Configuration.Name, + targetMachineConfigPoolLabel: i.MachineOSConfig.Spec.MachineConfigPool.Name, + desiredConfigLabel: i.MachineOSBuild.Spec.DesiredConfig.Name, }, Annotations: map[string]string{ mcPoolAnnotation: "", @@ -619,19 +546,19 @@ func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { // Computes the Dockerfile ConfigMap name based upon the MachineConfigPool name. func (i ImageBuildRequest) getDockerfileConfigMapName() string { - return fmt.Sprintf("dockerfile-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("dockerfile-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } // Computes the MachineConfig ConfigMap name based upon the MachineConfigPool name. func (i ImageBuildRequest) getMCConfigMapName() string { - return fmt.Sprintf("mc-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("mc-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } // Computes the build name based upon the MachineConfigPool name. func (i ImageBuildRequest) getBuildName() string { - return fmt.Sprintf("build-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("build-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } func (i ImageBuildRequest) getDigestConfigMapName() string { - return fmt.Sprintf("digest-%s", i.Pool.Spec.Configuration.Name) + return fmt.Sprintf("digest-%s", i.MachineOSBuild.Spec.DesiredConfig.Name) } diff --git a/pkg/controller/build/image_build_request_test.go b/pkg/controller/build/image_build_request_test.go index 0e4dc1d938..848ff6f063 100644 --- a/pkg/controller/build/image_build_request_test.go +++ b/pkg/controller/build/image_build_request_test.go @@ -15,7 +15,7 @@ func TestImageBuildRequest(t *testing.T) { osImageURLConfigMap := getOSImageURLConfigMap() onClusterBuildConfigMap := getOnClusterBuildConfigMap() - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, @@ -72,7 +72,7 @@ func TestImageBuildRequestMissingExtensionsImage(t *testing.T) { delete(osImageURLConfigMap.Data, baseOSExtensionsContainerImageConfigKey) - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, @@ -94,7 +94,7 @@ func TestImageBuildRequestWithCustomDockerfile(t *testing.T) { "worker": "FROM configs AS final\nRUN dnf install -y python3", }) - ibr := newImageBuildRequestFromBuildInputs(&buildInputs{ + ibr := newImageBuildRequestFromBuildInputs(&BuildInputs{ pool: mcp, osImageURL: osImageURLConfigMap, onClusterBuildConfig: onClusterBuildConfigMap, diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go index 6f3692d2d5..1167066d60 100644 --- a/pkg/controller/build/pod_build_controller.go +++ b/pkg/controller/build/pod_build_controller.go @@ -6,7 +6,7 @@ import ( "strings" "time" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" @@ -186,48 +186,27 @@ func (ctrl *PodBuildController) Run(ctx context.Context, workers int) { <-ctx.Done() } -// Gets the final image pullspec by retrieving the ConfigMap that the build pod -// creates from the Buildah digestfile. -func (ctrl *PodBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { - onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), OnClusterBuildConfigMapName, metav1.GetOptions{}) - if err != nil { - return "", err - } - - finalImageInfo := newFinalImageInfo(&buildInputs{ - onClusterBuildConfig: onClusterBuildConfigMap, - }) - ibr := newImageBuildRequest(pool) - - digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) - if err != nil { - return "", err - } - - return parseImagePullspec(finalImageInfo.Pullspec, digestConfigMap.Data["digest"]) -} - // Deletes the underlying build pod. -func (ctrl *PodBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { +func (ctrl *PodBuildController) DeleteBuildObject(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) error { // We want to ignore when a pod or ConfigMap is deleted if it is not found. // This is because when a pool is opted out of layering *after* a successful // build, no pod nor ConfigMap will remain. So we want to be able to // idempotently call this function in that case. return aggerrors.AggregateGoroutines( func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{})) }, func() error { - ibr := newImageBuildRequest(pool) + ibr := newImageBuildRequest(mosc, mosb) return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDigestConfigMapName(), metav1.DeleteOptions{})) }, ) } // Determines if a build is currently running by looking for a corresponding pod. -func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { - ibr := newImageBuildRequest(pool) +func (ctrl *PodBuildController) IsBuildRunning(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (bool, error) { + ibr := newImageBuildRequest(mosc, mosb) // First check if we have a build in progress for this MachineConfigPool and rendered config. _, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) @@ -240,7 +219,7 @@ func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) ( // Starts a new build pod, assuming one is not found first. In that case, it returns an object reference to the preexisting build pod. func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { - targetMC := ibr.Pool.Spec.Configuration.Name + targetMC := ibr.MachineOSBuild.Spec.DesiredConfig.Name // TODO: Find a constant for this: if !strings.HasPrefix(targetMC, "rendered-") { @@ -255,20 +234,20 @@ func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.Objec // This means we found a preexisting build pod. if pod != nil && err == nil && hasAllRequiredOSBuildLabels(pod.Labels) { - klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.Pool.Name) + klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.MachineOSConfig.Spec.MachineConfigPool.Name) return toObjectRef(pod), nil } - klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Starting build for pool %s", ibr.MachineOSConfig.Spec.MachineConfigPool.Name) klog.Infof("Build pod name: %s", ibr.getBuildName()) - klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.MachineOSConfig.Status.CurrentImagePullspec, ibr.MachineOSConfig.Spec.BuildOutputs.CurrentImagePullSecret.Name) pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuildPod(), metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("could not create build pod: %w", err) } - klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, pod.Name) + klog.Infof("Build started for pool %s in %s!", ibr.MachineOSConfig.Spec.MachineConfigPool.Name, pod.Name) return toObjectRef(pod), nil } diff --git a/pkg/controller/build/pool_state.go b/pkg/controller/build/pool_state.go deleted file mode 100644 index ae085df2b9..0000000000 --- a/pkg/controller/build/pool_state.go +++ /dev/null @@ -1,243 +0,0 @@ -package build - -import ( - "fmt" - - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/machine-config-operator/pkg/apihelpers" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - corev1 "k8s.io/api/core/v1" -) - -type poolState struct { - *ctrlcommon.LayeredPoolState - pool *mcfgv1.MachineConfigPool -} - -func newPoolState(pool *mcfgv1.MachineConfigPool) *poolState { - copied := pool.DeepCopy() - return &poolState{ - LayeredPoolState: ctrlcommon.NewLayeredPoolState(copied), - pool: copied, - } -} - -// Returns the name of the MachineConfigPool. -func (p *poolState) Name() string { - return p.pool.GetName() -} - -// Returns the name of the current MachineConfig. -func (p *poolState) CurrentMachineConfig() string { - return p.pool.Spec.Configuration.Name -} - -// Returns a deep-copy of the MachineConfigPool with any changes made, -// propagating spec.Configurtion to status.Configuration in the process. -func (p *poolState) MachineConfigPool() *mcfgv1.MachineConfigPool { - out := p.pool.DeepCopy() - out.Spec.Configuration.DeepCopyInto(&out.Status.Configuration) - return out -} - -// Sets the image pullspec annotation. -func (p *poolState) SetImagePullspec(pullspec string) { - if p.pool.Annotations == nil { - p.pool.Annotations = map[string]string{} - } - - p.pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = pullspec -} - -// Clears the image pullspec annotation. -func (p *poolState) ClearImagePullspec() { - if p.pool.Annotations == nil { - return - } - - delete(p.pool.Annotations, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) -} - -// Deletes a given build object reference by its name. -func (p *poolState) DeleteBuildRefByName(name string) { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef.Name != name && !p.isBuildObjectRef(objRef) - }) -} - -// Determines if a MachineConfigPool contains a reference to a Build or custom -// build pod for its current rendered MachineConfig. -func (p *poolState) HasBuildObjectForCurrentMachineConfig() bool { - return p.HasBuildObjectRefName(newImageBuildRequest(p.pool).getBuildName()) -} - -// Determines if a MachineConfigPool has a build object reference given its -// name. -func (p *poolState) HasBuildObjectRefName(name string) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src.Name == name && p.isBuildObjectRef(src) { - return true - } - } - - return false -} - -// Searches for any object reference. -func (p *poolState) HasObjectRef(objRef corev1.ObjectReference) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src == objRef { - return true - } - } - - return false -} - -// Searches only for build object references. -func (p *poolState) HasBuildObjectRef(objRef corev1.ObjectReference) bool { - for _, src := range p.pool.Spec.Configuration.Source { - if src == objRef && p.isBuildObjectRef(src) { - return true - } - } - - return false -} - -// Deletes the build object reference for the build belonging to the current MachineConfig. -func (p *poolState) DeleteBuildRefForCurrentMachineConfig() { - p.DeleteBuildRefByName(newImageBuildRequest(p.pool).getBuildName()) -} - -// Deletes all build object references. -func (p *poolState) DeleteAllBuildRefs() { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef.Kind == "MachineConfig" - }) -} - -// Deletes a given object reference. -func (p *poolState) DeleteObjectRef(toDelete corev1.ObjectReference) { - p.pool.Spec.Configuration.Source = p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return objRef != toDelete - }) -} - -// Gets all build object references. -func (p *poolState) GetBuildObjectRefs() []corev1.ObjectReference { - return p.getFilteredObjectRefs(func(objRef corev1.ObjectReference) bool { - return p.isBuildObjectRef(objRef) - }) -} - -// Adds a build object reference. Returns an error if a build object reference -// already exists. -func (p *poolState) AddBuildObjectRef(objRef corev1.ObjectReference) error { - if !p.isBuildObjectRef(objRef) { - return fmt.Errorf("%s not a valid build object kind", objRef.Kind) - } - - buildRefs := p.GetBuildObjectRefs() - if len(buildRefs) != 0 { - return fmt.Errorf("cannot add build ObjectReference, found: %v", buildRefs) - } - - p.pool.Spec.Configuration.Source = append(p.pool.Spec.Configuration.Source, objRef) - return nil -} - -// Clears all build object conditions. -func (p *poolState) ClearAllBuildConditions() { - p.pool.Status.Conditions = clearAllBuildConditions(p.pool.Status.Conditions) -} - -// Idempotently sets the supplied build conditions. -func (p *poolState) SetBuildConditions(conditions []mcfgv1.MachineConfigPoolCondition) { - for _, condition := range conditions { - condition := condition - currentCondition := apihelpers.GetMachineConfigPoolCondition(p.pool.Status, condition.Type) - if currentCondition != nil && isConditionEqual(*currentCondition, condition) { - continue - } - - mcpCondition := apihelpers.NewMachineConfigPoolCondition(condition.Type, condition.Status, condition.Reason, condition.Message) - apihelpers.SetMachineConfigPoolCondition(&p.pool.Status, *mcpCondition) - } -} - -// Gets all build conditions. -func (p *poolState) GetAllBuildConditions() []mcfgv1.MachineConfigPoolCondition { - buildConditions := []mcfgv1.MachineConfigPoolCondition{} - - for _, condition := range p.pool.Status.Conditions { - if p.isBuildCondition(condition) { - buildConditions = append(buildConditions, condition) - } - } - - return buildConditions -} - -func (p *poolState) isBuildCondition(cond mcfgv1.MachineConfigPoolCondition) bool { - for _, condType := range getMachineConfigPoolBuildConditions() { - if cond.Type == condType { - return true - } - } - - return false -} - -func (p *poolState) isBuildObjectRef(objRef corev1.ObjectReference) bool { - if objRef.Kind == "Pod" { - return true - } - - if objRef.Kind == "Build" { - return true - } - - return false -} - -func (p *poolState) getFilteredObjectRefs(filterFunc func(objRef corev1.ObjectReference) bool) []corev1.ObjectReference { - refs := []corev1.ObjectReference{} - - for _, src := range p.pool.Spec.Configuration.Source { - if filterFunc(src) { - refs = append(refs, src) - } - } - - return refs -} - -// Determines if two conditions are equal. Note: I purposely do not include the -// timestamp in the equality test, since we do not directly set it. -func isConditionEqual(cond1, cond2 mcfgv1.MachineConfigPoolCondition) bool { - return cond1.Type == cond2.Type && - cond1.Status == cond2.Status && - cond1.Message == cond2.Message && - cond1.Reason == cond2.Reason -} - -func clearAllBuildConditions(inConditions []mcfgv1.MachineConfigPoolCondition) []mcfgv1.MachineConfigPoolCondition { - conditions := []mcfgv1.MachineConfigPoolCondition{} - - for _, condition := range inConditions { - buildConditionFound := false - for _, buildConditionType := range getMachineConfigPoolBuildConditions() { - if condition.Type == buildConditionType { - buildConditionFound = true - break - } - } - - if !buildConditionFound { - conditions = append(conditions, condition) - } - } - - return conditions -} diff --git a/pkg/controller/build/pool_state_test.go b/pkg/controller/build/pool_state_test.go deleted file mode 100644 index b6eb3e3ac4..0000000000 --- a/pkg/controller/build/pool_state_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package build - -import ( - "testing" - - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/machine-config-operator/test/helpers" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" -) - -func TestPoolState(t *testing.T) { - t.Parallel() - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - mcp.Spec.Configuration.Source = []corev1.ObjectReference{ - { - Name: "mc-1", - Kind: "MachineConfig", - }, - { - Name: "mc-2", - Kind: "MachineConfig", - }, - } - - assert.NotEqual(t, mcp.Spec.Configuration, mcp.Status.Configuration) - - ps := newPoolState(mcp) - - ps.SetImagePullspec("registry.host.com/org/repo:tag") - assert.True(t, ps.HasOSImage()) - assert.Equal(t, "registry.host.com/org/repo:tag", ps.GetOSImage()) -} - -func TestPoolStateBuildRefs(t *testing.T) { - t.Parallel() - - mcRefs := []corev1.ObjectReference{ - { - Name: "mc-1", - Kind: "MachineConfig", - }, - { - Name: "mc-2", - Kind: "MachineConfig", - }, - } - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, mcRefs...) - - assert.NotEqual(t, mcp.Spec.Configuration, mcp.Status.Configuration) - - buildPodRef := corev1.ObjectReference{ - Kind: "Pod", - Name: "build-pod", - } - - buildRef := corev1.ObjectReference{ - Kind: "Build", - Name: "build", - } - - buildRefTests := []struct { - buildRef corev1.ObjectReference - errExpected bool - }{ - { - buildRef: buildPodRef, - }, - { - buildRef: buildRef, - }, - { - buildRef: corev1.ObjectReference{ - Kind: "MachineConfig", - Name: "mc-1", - }, - errExpected: true, - }, - } - - for _, buildRefTest := range buildRefTests { - t.Run("BuildRefTest", func(t *testing.T) { - ps := newPoolState(mcp) - if buildRefTest.errExpected { - assert.Error(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - return - } - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef), "initial insertion should not error") - assert.Equal(t, append(mcp.Spec.Configuration.Source, buildRefTest.buildRef), ps.MachineConfigPool().Spec.Configuration.Source) - assert.Equal(t, append(mcp.Spec.Configuration.Source, buildRefTest.buildRef), ps.MachineConfigPool().Status.Configuration.Source) - - assert.NotEqual(t, mcp.Spec.Configuration.Source, ps.MachineConfigPool().Spec.Configuration.Source, "should not mutate the underlying MCP") - assert.NotEqual(t, mcp.Status.Configuration.Source, ps.MachineConfigPool().Status.Configuration.Source, "should not mutate the underlying MCP") - - assert.Equal(t, []corev1.ObjectReference{buildRefTest.buildRef}, ps.GetBuildObjectRefs(), "expected build refs to include the inserted ref") - - assert.True(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.True(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.False(t, ps.HasBuildObjectRef(mcRefs[0]), "MachineConfigs should not match build objects") - assert.False(t, ps.HasBuildObjectRefName(mcRefs[0].Name), "MachineConfigs should not match build objects") - - assert.Error(t, ps.AddBuildObjectRef(buildPodRef), "should not be able to insert more than one build ref") - assert.Error(t, ps.AddBuildObjectRef(buildRef), "should not be able to insert more than one build ref") - ps.DeleteObjectRef(buildRefTest.buildRef) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - ps.DeleteBuildRefByName(buildRefTest.buildRef.Name) - assert.False(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.False(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - assert.NoError(t, ps.AddBuildObjectRef(buildRefTest.buildRef)) - ps.DeleteAllBuildRefs() - assert.False(t, ps.HasBuildObjectRef(buildRefTest.buildRef)) - assert.False(t, ps.HasBuildObjectRefName(buildRefTest.buildRef.Name)) - assert.Equal(t, mcp.Spec.Configuration.Source, ps.pool.Spec.Configuration.Source) - - outMCP := ps.MachineConfigPool() - assert.Equal(t, outMCP.Spec.Configuration, outMCP.Status.Configuration) - }) - } -} - -func TestPoolStateConditions(t *testing.T) { - t.Parallel() - - mcp := helpers.NewMachineConfigPoolBuilder("worker").WithMachineConfig("rendered-worker-1").MachineConfigPool() - ps := newPoolState(mcp) - - conditionTests := []struct { - condType mcfgv1.MachineConfigPoolConditionType - checkFunc func() bool - }{ - { - condType: mcfgv1.MachineConfigPoolBuildFailed, - checkFunc: ps.IsBuildFailure, - }, - { - condType: mcfgv1.MachineConfigPoolBuildPending, - checkFunc: ps.IsBuildPending, - }, - { - condType: mcfgv1.MachineConfigPoolBuildSuccess, - checkFunc: ps.IsBuildSuccess, - }, - { - condType: mcfgv1.MachineConfigPoolBuilding, - checkFunc: ps.IsBuilding, - }, - } - - for _, condTest := range conditionTests { - t.Run(string(condTest.condType), func(t *testing.T) { - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ - { - Status: corev1.ConditionTrue, - Type: condTest.condType, - }, - }) - - assert.True(t, condTest.checkFunc()) - - ps.SetBuildConditions([]mcfgv1.MachineConfigPoolCondition{ - { - Status: corev1.ConditionFalse, - Type: condTest.condType, - }, - }) - - assert.False(t, condTest.checkFunc()) - }) - } - - ps.ClearAllBuildConditions() - buildConditionTypes := getMachineConfigPoolBuildConditions() - for _, condition := range mcp.Status.Conditions { - for _, conditionType := range buildConditionTypes { - if conditionType == condition.Type { - t.Fatalf("expected not to find any build conditions, found: %v", conditionType) - } - } - } -} diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index a094c347ea..fbf090a6ae 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "reflect" + "sort" "strings" "text/template" @@ -1168,13 +1169,6 @@ func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotati n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) } -func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { - if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { - return true - } - return false -} - func DoARebuild(pool *mcfgv1.MachineConfigPool) bool { _, ok := pool.Labels[RebuildPoolLabel] return ok diff --git a/pkg/controller/common/layered_node_state.go b/pkg/controller/common/layered_node_state.go index 79c9e4e116..49d6a0f1e9 100644 --- a/pkg/controller/common/layered_node_state.go +++ b/pkg/controller/common/layered_node_state.go @@ -4,6 +4,7 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" corev1 "k8s.io/api/core/v1" ) @@ -26,8 +27,8 @@ func NewLayeredNodeState(n *corev1.Node) *LayeredNodeState { // Augements the isNodeDoneAt() check with determining if the current / desired // image annotations match the pools' values. -func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool) bool { - return isNodeDoneAt(l.node, mcp) && l.isDesiredImageEqualToPool(mcp) && l.isCurrentImageEqualToPool(mcp) +func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return isNodeDoneAt(l.node, mcp) && l.isDesiredImageEqualToPool(mcp, layered) && l.isCurrentImageEqualToPool(mcp, layered) } // The original behavior of getUnavailableMachines is: getUnavailableMachines @@ -39,14 +40,27 @@ func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool) bool { // // This augments this check by determining if the desired iamge annotation is // equal to what the pool expects. -func (l *LayeredNodeState) IsUnavailable(mcp *mcfgv1.MachineConfigPool) bool { - return isNodeUnavailable(l.node) && l.isDesiredImageEqualToPool(mcp) +func (l *LayeredNodeState) IsUnavailable(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return isNodeUnavailable(l.node) && l.isDesiredImageEqualToPool(mcp, layered) } // Checks that the desired machineconfig and image annotations equal the ones // specified by the pool. -func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp) +func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp, layered) +} + +func (l *LayeredNodeState) IsDesiredEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return l.isDesiredImageEqualToBuild(mosc) && l.isDesiredMachineConfigEqualToBuild(mosb) +} + +func (l *LayeredNodeState) isDesiredImageEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig) bool { + return l.isImageAnnotationEqualToBuild(daemonconsts.DesiredImageAnnotationKey, mosc) +} + +func (l *LayeredNodeState) isDesiredMachineConfigEqualToBuild(mosb *mcfgv1alpha1.MachineOSBuild) bool { + return l.node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] == mosb.Spec.DesiredConfig.Name + } // Compares the MachineConfig specified by the MachineConfigPool to the one @@ -57,26 +71,26 @@ func (l *LayeredNodeState) isDesiredMachineConfigEqualToPool(mcp *mcfgv1.Machine // Determines if the nodes desired image is equal to the expected value from // the MachineConfigPool. -func (l *LayeredNodeState) isDesiredImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isImageAnnotationEqualToPool(daemonconsts.DesiredImageAnnotationKey, mcp) +func (l *LayeredNodeState) isDesiredImageEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.DesiredImageAnnotationKey, mcp, layered) } // Determines if the nodes current image is equal to the expected value from // the MachineConfigPool. -func (l *LayeredNodeState) isCurrentImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { - return l.isImageAnnotationEqualToPool(daemonconsts.CurrentImageAnnotationKey, mcp) +func (l *LayeredNodeState) isCurrentImageEqualToPool(mcp *mcfgv1.MachineConfigPool, layered bool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.CurrentImageAnnotationKey, mcp, layered) } // Determines if a nodes' image annotation is equal to the expected value from // the MachineConfigPool. If the pool is layered, this value should equal the // OS image value, if the value is available. If the pool is not layered, then // any image annotations should not be present on the node. -func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1.MachineConfigPool) bool { +func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1.MachineConfigPool, layered bool) bool { lps := NewLayeredPoolState(mcp) val, ok := l.node.Annotations[anno] - if lps.IsLayered() && lps.HasOSImage() { + if lps.HasOSImage() { // If the pool is layered and has an OS image, check that it equals the // node annotations' value. return lps.GetOSImage() == val @@ -86,6 +100,21 @@ func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1 return val == "" || !ok } +func (l *LayeredNodeState) isImageAnnotationEqualToBuild(anno string, mosc *mcfgv1alpha1.MachineOSConfig) bool { + mosbs := NewMachineOSConfigState(mosc) + + val, ok := l.node.Annotations[anno] + + if mosbs.HasOSImage() { + // If the pool is layered and has an OS image, check that it equals the + // node annotations' value. + return mosbs.GetOSImage() == val + } + + // If the pool is not layered, this annotation should not exist. + return val == "" || !ok +} + // Sets the desired annotations from the MachineConfigPool, according to the // following rules: // @@ -98,7 +127,7 @@ func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1 // // Note: This will create a deep copy of the node object first to avoid // mutating any underlying caches. -func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool) { +func (l *LayeredNodeState) SetDesiredStateFromPool(layered bool, mcp *mcfgv1.MachineConfigPool) { node := l.Node() if node.Annotations == nil { node.Annotations = map[string]string{} @@ -108,7 +137,7 @@ func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool lps := NewLayeredPoolState(mcp) - if lps.IsLayered() && lps.HasOSImage() { + if lps.HasOSImage() { node.Annotations[daemonconsts.DesiredImageAnnotationKey] = lps.GetOSImage() } else { delete(node.Annotations, daemonconsts.DesiredImageAnnotationKey) @@ -117,6 +146,24 @@ func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool l.node = node } +func (l *LayeredNodeState) SetDesiredStateFromMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) { + node := l.Node() + if node.Annotations == nil { + node.Annotations = map[string]string{} + } + + node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] = mosb.Spec.DesiredConfig.Name + moscs := NewMachineOSConfigState(mosc) + + if moscs.HasOSImage() { + node.Annotations[daemonconsts.DesiredImageAnnotationKey] = moscs.GetOSImage() + } else { + delete(node.Annotations, daemonconsts.DesiredImageAnnotationKey) + } + + l.node = node +} + // Returns a deep copy of the underlying node object. func (l *LayeredNodeState) Node() *corev1.Node { return l.node.DeepCopy() diff --git a/pkg/controller/common/layered_pool_state.go b/pkg/controller/common/layered_pool_state.go index ef7925e582..497ae016a8 100644 --- a/pkg/controller/common/layered_pool_state.go +++ b/pkg/controller/common/layered_pool_state.go @@ -18,20 +18,6 @@ func NewLayeredPoolState(pool *mcfgv1.MachineConfigPool) *LayeredPoolState { return &LayeredPoolState{pool: pool} } -// Determines if a MachineConfigPool is layered by looking for the layering -// enabled label. -func (l *LayeredPoolState) IsLayered() bool { - if l.pool == nil { - return false - } - - if l.pool.Labels == nil { - return false - } - - return IsLayeredPool(l.pool) -} - // Returns the OS image, if one is present. func (l *LayeredPoolState) GetOSImage() string { osImage := l.pool.Annotations[ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] diff --git a/pkg/controller/common/layered_pool_state_test.go b/pkg/controller/common/layered_pool_state_test.go index 3c44f60168..d38ed7ecc7 100644 --- a/pkg/controller/common/layered_pool_state_test.go +++ b/pkg/controller/common/layered_pool_state_test.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -func TestLayeredPoolState(t *testing.T) { +func TestMachineOSBuildState(t *testing.T) { t.Parallel() tests := []struct { @@ -83,7 +83,7 @@ func TestLayeredPoolState(t *testing.T) { apihelpers.SetMachineConfigPoolCondition(&test.pool.Status, *cond) } - lps := NewLayeredPoolState(test.pool) + lps := NewMachineOSBuildState(test.pool) assert.Equal(t, test.isLayered, lps.IsLayered(), "is layered mismatch %s", spew.Sdump(test.pool.Labels)) assert.Equal(t, test.hasOSImage, lps.HasOSImage(), "has OS image mismatch %s", spew.Sdump(test.pool.Annotations)) diff --git a/pkg/controller/common/mos_state.go b/pkg/controller/common/mos_state.go new file mode 100644 index 0000000000..83981cb96f --- /dev/null +++ b/pkg/controller/common/mos_state.go @@ -0,0 +1,221 @@ +package common + +import ( + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This is intended to provide a singular way to interrogate MachineConfigPool +// objects to determine if they're in a specific state or not. The eventual +// goal is to use this to mutate the MachineConfigPool object to provide a +// single and consistent interface for that purpose. In this current state, we +// do not perform any mutations. +type MachineOSBuildState struct { + Build *mcfgv1alpha1.MachineOSBuild +} + +type MachineOSConfigState struct { + Config *mcfgv1alpha1.MachineOSConfig +} + +func NewMachineOSConfigState(mosc *mcfgv1alpha1.MachineOSConfig) *MachineOSConfigState { + return &MachineOSConfigState{ + Config: mosc, + } +} + +func NewMachineOSBuildState(mosb *mcfgv1alpha1.MachineOSBuild) *MachineOSBuildState { + return &MachineOSBuildState{ + Build: mosb, + } +} + +// Returns the OS image, if one is present. +func (l *MachineOSConfigState) GetOSImage() string { + osImage := l.Config.Status.CurrentImagePullspec + return osImage +} + +// Determines if an OS image build is a success. +func (l *MachineOSBuildState) IsBuildSuccess() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildSucceeded) +} + +// Determines if an OS image build is pending. +func (l *MachineOSBuildState) IsBuildPending() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) +} + +// Determines if an OS image build is in progress. +func (l *MachineOSBuildState) IsBuilding() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) +} + +// Determines if an OS image build has failed. +func (l *MachineOSBuildState) IsBuildFailure() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildFailed) +} + +// Determines if an OS image build has failed. +func (l *MachineOSBuildState) IsBuildInterrupted() bool { + return apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildInterrupted) +} + +func (l *MachineOSBuildState) IsAnyDegraded() bool { + condTypes := []mcfgv1alpha1.BuildProgress{ + mcfgv1alpha1.MachineOSBuildFailed, + mcfgv1alpha1.MachineOSBuildInterrupted, + } + + for _, condType := range condTypes { + if apihelpers.IsMachineOSBuildConditionTrue(l.Build.Status.Conditions, condType) { + return true + } + } + + return false +} + +// Idempotently sets the supplied build conditions. +func (b *MachineOSBuildState) SetBuildConditions(conditions []metav1.Condition) { + for _, condition := range conditions { + condition := condition + currentCondition := apihelpers.GetMachineOSBuildCondition(b.Build.Status, mcfgv1alpha1.BuildProgress(condition.Type)) + if currentCondition != nil && isConditionEqual(*currentCondition, condition) { + continue + } + + mosbCondition := apihelpers.NewMachineOSBuildCondition(condition.Type, condition.Status, condition.Reason, condition.Message) + apihelpers.SetMachineOSBuildCondition(&b.Build.Status, *mosbCondition) + } +} + +// Determines if two conditions are equal. Note: I purposely do not include the +// timestamp in the equality test, since we do not directly set it. +func isConditionEqual(cond1, cond2 metav1.Condition) bool { + return cond1.Type == cond2.Type && + cond1.Status == cond2.Status && + cond1.Message == cond2.Message && + cond1.Reason == cond2.Reason +} + +// Determines if a given MachineConfigPool has an available OS image. Returns +// false if the annotation is missing or set to an empty string. +func (m *MachineOSConfigState) HasOSImage() bool { + val := m.Config.Status.CurrentImagePullspec + return val != "" +} + +// Clears the image pullspec annotation. +func (m *MachineOSConfigState) ClearImagePullspec() { + m.Config.Spec.BuildInputs.RenderedImagePushspec = "" + m.Config.Status.CurrentImagePullspec = "" +} + +// Clears all build object conditions. +func (m *MachineOSBuildState) ClearAllBuildConditions() { + m.Build.Status.Conditions = clearAllBuildConditions(m.Build.Status.Conditions) +} + +func clearAllBuildConditions(inConditions []metav1.Condition) []metav1.Condition { + conditions := []metav1.Condition{} + + for _, condition := range inConditions { + buildConditionFound := false + for _, buildConditionType := range getMachineConfigBuildConditions() { + if condition.Type == string(buildConditionType) { + buildConditionFound = true + break + } + } + + if !buildConditionFound { + conditions = append(conditions, condition) + } + } + + return conditions +} + +func getMachineConfigBuildConditions() []mcfgv1alpha1.BuildProgress { + return []mcfgv1alpha1.BuildProgress{ + mcfgv1alpha1.MachineOSBuildFailed, + mcfgv1alpha1.MachineOSBuildInterrupted, + mcfgv1alpha1.MachineOSBuildPrepared, + mcfgv1alpha1.MachineOSBuildSucceeded, + mcfgv1alpha1.MachineOSBuilding, + } +} + +func IsPoolAnyDegraded(pool *mcfgv1.MachineConfigPool) bool { + condTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolNodeDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + } + + for _, condType := range condTypes { + if apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, condType) { + return true + } + } + + return false +} + +// Determine if we have a config change. +func IsPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { + return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +} + +func HasBuildObjectForCurrentMachineConfig(pool *mcfgv1.MachineConfigPool, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return pool.Spec.Configuration.Name == mosb.Spec.DesiredConfig.Name +} + +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func BuildDueToPoolChange(builder interface { + IsBuildRunning(*mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSConfig) (bool, error) +}, oldPool, curPool *mcfgv1.MachineConfigPool, moscNew *mcfgv1alpha1.MachineOSConfig, mosbNew *mcfgv1alpha1.MachineOSBuild) (bool, error) { + + moscState := NewMachineOSConfigState(moscNew) + mosbState := NewMachineOSBuildState(mosbNew) + + // If we don't have a layered pool, we should not build. + poolStateSuggestsBuild := canPoolBuild(curPool, moscState, mosbState) && + // If we have a config change or we're missing an image pullspec label, we + // should do a build. + (IsPoolConfigChange(oldPool, curPool) || !moscState.HasOSImage()) + + return poolStateSuggestsBuild, nil + +} + +// Checks our pool to see if we can do a build. We base this off of a few criteria: +// 1. Is the pool opted into layering? +// 2. Do we have an object reference to an in-progress build? +// 3. Is the pool degraded? +// 4. Is our build in a specific state? +// +// Returns true if we are able to build. +func canPoolBuild(pool *mcfgv1.MachineConfigPool, moscNewState *MachineOSConfigState, mosbNewState *MachineOSBuildState) bool { + // If we don't have a layered pool, we should not build. + if !IsLayeredPool(moscNewState.Config, mosbNewState.Build) { + return false + } + // If the pool is degraded, we should not build. + if IsPoolAnyDegraded(pool) { + return false + } + // If the new pool has an ongoing build, we should not build + if mosbNewState.Build != nil { + return false + } + return true +} + +func IsLayeredPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + return (mosc != nil || mosb != nil) +} diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 78d705cb3f..b7d93b6fc9 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -13,11 +13,16 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" + cligoinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" cligolistersv1 "github.com/openshift/client-go/config/listers/config/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" "github.com/openshift/library-go/pkg/operator/v1helpers" "github.com/openshift/machine-config-operator/internal" @@ -85,11 +90,13 @@ type Controller struct { mcpLister mcfglistersv1.MachineConfigPoolLister nodeLister corelisterv1.NodeLister podLister corelisterv1.PodLister + mosbLister mcfglistersv1alpha1.MachineOSBuildLister ccListerSynced cache.InformerSynced mcListerSynced cache.InformerSynced mcpListerSynced cache.InformerSynced nodeListerSynced cache.InformerSynced + mosbListerSynced cache.InformerSynced schedulerList cligolistersv1.SchedulerLister schedulerListerSynced cache.InformerSynced @@ -109,6 +116,7 @@ func New( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -118,6 +126,7 @@ func New( ccInformer, mcInformer, mcpInformer, + mosbInformer, nodeInformer, podInformer, schedulerInformer, @@ -134,6 +143,7 @@ func NewWithCustomUpdateDelay( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -144,6 +154,7 @@ func NewWithCustomUpdateDelay( ccInformer, mcInformer, mcpInformer, + mosbInformer, nodeInformer, podInformer, schedulerInformer, @@ -159,6 +170,7 @@ func newController( ccInformer mcfginformersv1.ControllerConfigInformer, mcInformer mcfginformersv1.MachineConfigInformer, mcpInformer mcfginformersv1.MachineConfigPoolInformer, + mosbInformer mcfginformersv1alpha1.MachineOSBuildInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, schedulerInformer cligoinformersv1.SchedulerInformer, @@ -179,7 +191,11 @@ func newController( updateDelay: updateDelay, fgAcessor: fgAccessor, } - + mosbInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineOSBuild, + UpdateFunc: ctrl.updateMachineOSBuild, + DeleteFunc: ctrl.deleteMachineOSBuild, + }) mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: ctrl.addMachineConfigPool, UpdateFunc: ctrl.updateMachineConfigPool, @@ -208,6 +224,7 @@ func newController( ctrl.mcListerSynced = mcInformer.Informer().HasSynced ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced ctrl.nodeListerSynced = nodeInformer.Informer().HasSynced + ctrl.mosbListerSynced = mosbInformer.Informer().HasSynced ctrl.schedulerList = schedulerInformer.Lister() ctrl.schedulerListerSynced = schedulerInformer.Informer().HasSynced @@ -360,6 +377,42 @@ func (ctrl *Controller) makeMasterNodeSchedulable(node *corev1.Node) error { return nil } +func (ctrl *Controller) addMachineOSBuild(obj interface{}) { + curMOSB := obj.(*mcfgv1alpha1.MachineOSBuild) + + config, _ := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().Get(context.TODO(), curMOSB.Spec.MachineOSConfig.Name, metav1.GetOptions{}) + + mcp, _ := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + ctrl.enqueueMachineConfigPool(mcp) +} + +func (ctrl *Controller) updateMachineOSBuild(old, cur interface{}) { + curMOSB := cur.(*mcfgv1alpha1.MachineOSBuild) + + config, _ := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().Get(context.TODO(), curMOSB.Spec.MachineOSConfig.Name, metav1.GetOptions{}) + + mcp, _ := ctrl.mcpLister.Get(config.Spec.MachineConfigPool.Name) + ctrl.enqueueMachineConfigPool(mcp) +} + +func (ctrl *Controller) deleteMachineOSBuild(obj interface{}) { + pool, ok := obj.(*mcfgv1alpha1.MachineOSBuild) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSBuild) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a MOSB %#v", obj)) + return + } + } + klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) + // TODO(abhinavdahiya): handle deletes. +} + func (ctrl *Controller) addMachineConfigPool(obj interface{}) { pool := obj.(*mcfgv1.MachineConfigPool) klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) @@ -764,28 +817,81 @@ func (ctrl *Controller) handleErr(err error, key interface{}) { // 2. If a MachineConfig changes, we should wait for the OS image build to be // ready so we can update both the nodes' desired MachineConfig and desired // image annotations simultaneously. + +func (ctrl *Controller) GetConfigAndBuild(pool *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { + var ourConfig *mcfgv1alpha1.MachineOSConfig + var ourBuild *mcfgv1alpha1.MachineOSBuild + configList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + for _, config := range configList.Items { + if config.Spec.MachineConfigPool.Name == pool.Name { + ourConfig = &config + break + } + } + + if ourConfig == nil { + return nil, nil, nil + } + + buildList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + for _, build := range buildList.Items { + if build.Spec.MachineOSConfig.Name == ourConfig.Name { + if build.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + ourBuild = &build + break + } + } + } + + return ourConfig, ourBuild, nil + +} + func (ctrl *Controller) canLayeredPoolContinue(pool *mcfgv1.MachineConfigPool) (string, bool, error) { - lps := ctrlcommon.NewLayeredPoolState(pool) - hasImage := lps.HasOSImage() - pullspec := lps.GetOSImage() + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + if mosc == nil || mosb == nil { + return "No MachineOSConfig or Build for this pool", false, nil + } + + cs := ctrlcommon.NewMachineOSConfigState(mosc) + + // annoying that we need to either a) pass aroung mosb lister and mosb lister EVERYWHERE + // or b) need to retrv one or the other depending on which is passed into the func. + // would be nice if we could a) at least centralize this code: ctrl.GetConfigAndBuild(pool) + // if we did this, we could potentially, just pass the pool around (as we used to) and shell out to the listers in the func to get the obj assoc. with the pool + //owner := mosb.OwnerReferences[0] + + bs := ctrlcommon.NewMachineOSBuildState(mosb) + + hasImage := cs.HasOSImage() + pullspec := cs.GetOSImage() if !hasImage { - return fmt.Sprintf("Image annotation %s is not set", ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey), false, nil + return "Desired Image not set in MachineOSBuild", false, nil } switch { // If the build is successful and we have the image pullspec, we can proceed // with rolling out the new OS image. - case lps.IsBuildSuccess() && hasImage: + case bs.IsBuildSuccess() && hasImage: msg := fmt.Sprintf("Image built successfully, pullspec: %s", pullspec) return msg, true, nil - case lps.IsBuildPending(): + case bs.IsBuildPending(): return "Image build pending", false, nil - case lps.IsBuilding(): + case bs.IsBuilding(): return "Image build in progress", false, nil - case lps.IsBuildFailure(): - return "Image build failed", false, fmt.Errorf("image build for MachineConfigPool %s failed", pool.Name) + case bs.IsBuildFailure(): + return "Image build failed", false, fmt.Errorf("image build for MachineConfigPool %s failed", mosb.Name) default: return "Image is not ready yet", false, nil } @@ -845,7 +951,9 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return ctrl.syncStatusOnly(pool) } - if ctrlcommon.IsLayeredPool(pool) { + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + if ok := ctrl.IsLayeredPool(pool, mosc, mosb); ok { reason, canApplyUpdates, err := ctrl.canLayeredPoolContinue(pool) if err != nil { klog.Infof("Layered pool %s encountered an error: %s", pool.Name, err) @@ -892,8 +1000,10 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { hasInProgressTaint := checkIfNodeHasInProgressTaint(node) lns := ctrlcommon.NewLayeredNodeState(node) + config, build, _ := ctrl.GetConfigAndBuild(pool) - if lns.IsDesiredEqualToPool(pool) { + layered := ctrl.IsLayeredPool(pool, config, build) + if lns.IsDesiredEqualToPool(pool, layered) { if hasInProgressTaint { if err := ctrl.removeUpdateInProgressTaint(ctx, node.Name); err != nil { err = fmt.Errorf("failed removing %s taint for node %s: %w", constants.NodeUpdateInProgressTaint.Key, node.Name, err) @@ -909,7 +1019,13 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } } } - candidates, capacity := getAllCandidateMachines(pool, nodes, maxunavail) + + // NOTE + // this needs to get triggered, the new os img needs to propogate here and be set on the candidate machines. + config, build, _ := ctrl.GetConfigAndBuild(pool) + + layered := ctrl.IsLayeredPool(pool, config, build) + candidates, capacity := getAllCandidateMachines(layered, config, build, pool, nodes, maxunavail) if len(candidates) > 0 { zones := make(map[string]bool) for _, candidate := range candidates { @@ -994,7 +1110,9 @@ func (ctrl *Controller) setClusterConfigAnnotation(nodes []*corev1.Node) error { return nil } -func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.MachineConfigPool) error { +// updateCandidateNode needs to understand MOSB +// specifically, the LayeredNodeState probably needs to understand mosb +func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, nodeName string, pool *mcfgv1.MachineConfigPool) error { return clientretry.RetryOnConflict(constants.NodeUpdateBackoff, func() error { oldNode, err := ctrl.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { @@ -1006,14 +1124,31 @@ func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.Machin } lns := ctrlcommon.NewLayeredNodeState(oldNode) - if lns.IsDesiredEqualToPool(pool) { - // If the node's desired annotations match the pool, return directly without updating the node. - klog.Infof("no update is needed") - return nil + layered := ctrl.IsLayeredPool(pool, mosc, mosb) + if mosb == nil { + if lns.IsDesiredEqualToPool(pool, layered) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + return nil + + } + lns.SetDesiredStateFromPool(layered, pool) + + } else { + if lns.IsDesiredEqualToBuild(mosc, mosb) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + return nil + } + // ensure this is happening. it might not be. + // we need to ensure the node controller is triggered at all the same times + // when using this new system + // we know the mosc+mosb can trigger one another and cause a build, but if the node controller + // can't set this anno, and subsequently cannot trigger the daemon to update, we need to rework. + lns.SetDesiredStateFromMachineOSConfig(mosc, mosb) } // Set the desired state to match the pool. - lns.SetDesiredStateFromPool(pool) newData, err := json.Marshal(lns.Node()) if err != nil { @@ -1031,8 +1166,8 @@ func (ctrl *Controller) updateCandidateNode(nodeName string, pool *mcfgv1.Machin // getAllCandidateMachines returns all possible nodes which can be updated to the target config, along with a maximum // capacity. It is the reponsibility of the caller to choose a subset of the nodes given the capacity. -func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { - unavail := getUnavailableMachines(nodesInPool, pool) +func getAllCandidateMachines(layered bool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { + unavail := getUnavailableMachines(nodesInPool, pool, layered, build) if len(unavail) >= maxUnavailable { klog.Infof("No nodes available for updates") return nil, 0 @@ -1043,11 +1178,19 @@ func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*core var nodes []*corev1.Node for _, node := range nodesInPool { lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsDesiredEqualToPool(pool) { - if isNodeMCDFailing(node) { - failingThisConfig++ + if !layered { + if lns.IsDesiredEqualToPool(pool, layered) { + if isNodeMCDFailing(node) { + failingThisConfig++ + } + continue + } + } else { + if lns.IsDesiredEqualToBuild(config, build) { + // If the node's desired annotations match the pool, return directly without updating the node. + klog.Infof("no update is needed") + continue } - continue } nodes = append(nodes, node) } @@ -1062,8 +1205,8 @@ func getAllCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*core } // getCandidateMachines returns the maximum subset of nodes which can be updated to the target config given availability constraints. -func getCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) []*corev1.Node { - nodes, capacity := getAllCandidateMachines(pool, nodesInPool, maxUnavailable) +func getCandidateMachines(pool *mcfgv1.MachineConfigPool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, nodesInPool []*corev1.Node, maxUnavailable int, layered bool) []*corev1.Node { + nodes, capacity := getAllCandidateMachines(layered, config, build, pool, nodesInPool, maxUnavailable) if uint(len(nodes)) < capacity { return nodes } @@ -1110,6 +1253,8 @@ func (ctrl *Controller) filterControlPlaneCandidateNodes(pool *mcfgv1.MachineCon return newCandidates, capacity, nil } +// SetDesiredStateFromPool in old mco explains how this works. Somehow you need to NOT FAIL if the mosb doesn't exist. So +// we still need to base this whole things on pools but IsLayeredPool == does mosb exist // updateCandidateMachines sets the desiredConfig annotation the candidate machines func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) error { if pool.Name == ctrlcommon.MachineConfigPoolMaster { @@ -1135,25 +1280,26 @@ func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, func (ctrl *Controller) setDesiredAnnotations(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node) error { eventName := "SetDesiredConfig" + config, build, _ := ctrl.GetConfigAndBuild(pool) - if ctrlcommon.IsLayeredPool(pool) { + if layered := ctrl.IsLayeredPool(pool, config, build); layered { eventName = "SetDesiredConfigAndOSImage" klog.Infof("Continuing to sync layered MachineConfigPool %s", pool.Name) } for _, node := range candidates { - ctrl.logPool(pool, "Setting node %s target to %s", node.Name, getPoolUpdateLine(pool)) - if err := ctrl.updateCandidateNode(node.Name, pool); err != nil { - return fmt.Errorf("setting desired %s for node %s: %w", getPoolUpdateLine(pool), node.Name, err) + //ctrl.logPool(pool, "Setting node %s target to %s", node.Name, getPoolUpdateLine(pool)) + if err := ctrl.updateCandidateNode(config, build, node.Name, pool); err != nil { + return fmt.Errorf("setting desired %s for node %s: %w", &pool.Spec.Configuration.Name, node.Name, err) } } if len(candidates) == 1 { candidate := candidates[0] - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Targeted node %s to %s", candidate.Name, getPoolUpdateLine(pool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Targeted node %s to %s", candidate.Name, &pool.Spec.Configuration.Name) } else { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Set target for %d nodes to %s", len(candidates), getPoolUpdateLine(pool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, eventName, "Set target for %d nodes to %s", len(candidates), &pool.Spec.Configuration.Name) } return nil @@ -1280,3 +1426,11 @@ func getErrorString(err error) string { } return "" } + +func (ctrl *Controller) IsLayeredPool(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { + fg, err := ctrl.fgAcessor.CurrentFeatureGates() + if err != nil { + return false + } + return (mosc != nil || mosb != nil) && fg.Enabled(configv1.FeatureGateOnClusterBuild) +} diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 142eb69b25..f832086176 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -778,7 +778,7 @@ func TestGetCandidateMachines(t *testing.T) { pool := pb.MachineConfigPool() - got := getCandidateMachines(pool, test.nodes, test.progress) + got := getCandidateMachines(pool, test.nodes, test.progress, true) nodeNames := getNamesFromNodes(got) assert.Equal(t, test.expected, nodeNames) @@ -1108,13 +1108,13 @@ func TestShouldMakeProgress(t *testing.T) { mcp := test.infraPool existingNodeBuilder := helpers.NewNodeBuilder("existingNodeAtDesiredConfig").WithEqualConfigs(machineConfigV1).WithLabels(map[string]string{"node-role/worker": "", "node-role/infra": ""}) - lps := ctrlcommon.NewLayeredPoolState(mcp) + lps := ctrlcommon.NewMachineOSBuildState(mcp) if lps.IsLayered() && lps.HasOSImage() { image := lps.GetOSImage() existingNodeBuilder.WithDesiredImage(image).WithCurrentImage(image) } - lps = ctrlcommon.NewLayeredPoolState(mcpWorker) + lps = ctrlcommon.NewMachineOSBuildState(mcpWorker) if lps.IsLayered() && lps.HasOSImage() { image := lps.GetOSImage() existingNodeBuilder.WithDesiredImage(image).WithCurrentImage(image) @@ -1178,7 +1178,7 @@ func TestShouldMakeProgress(t *testing.T) { t.Fatal(err) } - lps := ctrlcommon.NewLayeredPoolState(mcp) + lps := ctrlcommon.NewMachineOSBuildState(mcp) if lps.IsLayered() && lps.HasOSImage() && lps.IsBuildSuccess() { t.Logf("expecting that the node should get the desired image annotation, desired image is: %s", lps.GetOSImage()) expNode.Annotations[daemonconsts.DesiredImageAnnotationKey] = lps.GetOSImage() diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 95ca7863c7..5078b2b974 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -8,6 +8,7 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" helpers "github.com/openshift/machine-config-operator/pkg/helpers" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" @@ -53,7 +54,10 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { machineConfigStates = append(machineConfigStates, ms) } } - newStatus := calculateStatus(machineConfigStates, cc, pool, nodes) + + mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + + newStatus := ctrl.calculateStatus(machineConfigStates, cc, pool, nodes, mosc, mosb) if equality.Semantic.DeepEqual(pool.Status, newStatus) { return nil } @@ -62,17 +66,19 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { newPool.Status = newStatus _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), newPool, metav1.UpdateOptions{}) + l := ctrl.IsLayeredPool(pool, mosc, mosb) + if pool.Spec.Configuration.Name != newPool.Spec.Configuration.Name { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool, mosc, mosb, l)) } if pool.Status.Configuration.Name != newPool.Status.Configuration.Name { - ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Completed", "Pool %s has completed update to %s", pool.Name, getPoolUpdateLine(newPool)) + ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Completed", "Pool %s has completed update to %s", pool.Name, getPoolUpdateLine(newPool, mosc, mosb, l)) } return err } //nolint:gocyclo -func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { +func (ctrl *Controller) calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgalphav1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) mcfgv1.MachineConfigPoolStatus { certExpirys := []mcfgv1.CertExpiry{} if cconfig != nil { for _, cert := range cconfig.Status.ControllerCertificates { @@ -88,6 +94,8 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr } machineCount := int32(len(nodes)) + l := ctrl.IsLayeredPool(pool, mosc, mosb) + var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node // if we represent updating properly here, we will also represent updating properly in the CO // so this solves the cordoning RFE and the upgradeable RFE @@ -153,13 +161,14 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr // this is # 1 priority, get the upgrade states actually reporting if degradedMachineCount+readyMachineCount+unavailableMachineCount+updatingMachineCount != int32(len(nodes)) { - updatedMachines = getUpdatedMachines(pool, nodes) + + updatedMachines = getUpdatedMachines(pool, nodes, mosc, mosb, l) updatedMachineCount = int32(len(updatedMachines)) - readyMachines = getReadyMachines(pool, nodes) + readyMachines = getReadyMachines(pool, nodes, mosc, mosb, l) readyMachineCount = int32(len(readyMachines)) - unavailableMachines = getUnavailableMachines(nodes, pool) + unavailableMachines = getUnavailableMachines(nodes, pool, l, mosb) unavailableMachineCount = int32(len(unavailableMachines)) degradedMachines = getDegradedMachines(nodes) @@ -196,7 +205,7 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr if allUpdated { //TODO: update api to only have one condition regarding status of update. - updatedMsg := fmt.Sprintf("All nodes are updated with %s", getPoolUpdateLine(pool)) + updatedMsg := fmt.Sprintf("All nodes are updated with %s", getPoolUpdateLine(pool, mosc, mosb, l)) supdated := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionTrue, "", updatedMsg) apihelpers.SetMachineConfigPoolCondition(&status, *supdated) @@ -210,10 +219,10 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr supdated := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionFalse, "", "") apihelpers.SetMachineConfigPoolCondition(&status, *supdated) if pool.Spec.Paused { - supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionFalse, "", fmt.Sprintf("Pool is paused; will not update to %s", getPoolUpdateLine(pool))) + supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionFalse, "", fmt.Sprintf("Pool is paused; will not update to %s", getPoolUpdateLine(pool, mosc, mosb, l))) apihelpers.SetMachineConfigPoolCondition(&status, *supdating) } else { - supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, "", fmt.Sprintf("All nodes are updating to %s", getPoolUpdateLine(pool))) + supdating := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, "", fmt.Sprintf("All nodes are updating to %s", getPoolUpdateLine(pool, mosc, mosb, l))) apihelpers.SetMachineConfigPoolCondition(&status, *supdating) } } @@ -247,16 +256,16 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr return status } -func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool) string { +func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) string { targetConfig := pool.Spec.Configuration.Name mcLine := fmt.Sprintf("MachineConfig %s", targetConfig) - if !ctrlcommon.IsLayeredPool(pool) { + if !layered { return mcLine } - targetImage, ok := pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] - if !ok { + targetImage := mosc.Status.CurrentImagePullspec + if len(targetImage) == 0 { return mcLine } @@ -323,12 +332,20 @@ func isNodeMCDFailing(node *corev1.Node) bool { // getUpdatedMachines filters the provided nodes to return the nodes whose // current config matches the desired config, which also matches the target config, // and the "done" flag is set. -func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) []*corev1.Node { +func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { var updated []*corev1.Node for _, node := range nodes { lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsDoneAt(pool) { - updated = append(updated, node) + if mosb != nil && mosc != nil { + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) + if layered && mosbState.IsBuildSuccess() && mosb.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { + updated = append(updated, node) + } + } else { + if lns.IsDoneAt(pool, layered) { + updated = append(updated, node) + + } } } return updated @@ -336,8 +353,8 @@ func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) [] // getReadyMachines filters the provided nodes to return the nodes // that are updated and marked ready -func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) []*corev1.Node { - updated := getUpdatedMachines(pool, nodes) +func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { + updated := getUpdatedMachines(pool, nodes, mosc, mosb, layered) var ready []*corev1.Node for _, node := range updated { if isNodeReady(node) { @@ -400,12 +417,21 @@ func isNodeUnavailable(node *corev1.Node) bool { // node *may* go unschedulable in the future, so we don't want to // potentially start another node update exceeding our maxUnavailable. // Somewhat the opposite of getReadyNodes(). -func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool) []*corev1.Node { +func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool, layered bool, mosb *mcfgv1alpha1.MachineOSBuild) []*corev1.Node { var unavail []*corev1.Node for _, node := range nodes { - lns := ctrlcommon.NewLayeredNodeState(node) - if lns.IsUnavailable(pool) { - unavail = append(unavail, node) + if mosb != nil { + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) + // if node is unavail, desiredConfigs match, and the build is a success, then we are unavail. + // not sure on this one honestly + if layered && isNodeUnavailable(node) && mosb.Spec.DesiredConfig.Name == pool.Status.Configuration.Name && mosbState.IsBuildSuccess() { + unavail = append(unavail, node) + } + } else { + lns := ctrlcommon.NewLayeredNodeState(node) + if lns.IsUnavailable(pool, layered) { + unavail = append(unavail, node) + } } } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 8a0b2928d0..a0192058a0 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -207,7 +207,7 @@ func New( klog.Errorf("Could not modify scheme: %v", err) } - for _, i := range []cache.SharedIndexInformer{ + informers := []cache.SharedIndexInformer{ controllerConfigInformer.Informer(), serviceAccountInfomer.Informer(), crdInformer.Informer(), @@ -230,8 +230,12 @@ func New( mcoSecretInformer.Informer(), ocSecretInformer.Informer(), mcoCOInformer.Informer(), - mcopInformer.Informer(), - } { + } + + // this is for the FG + // informers = append(informers, moscInformer.Informer()) + + for _, i := range informers { i.AddEventHandler(optr.eventHandler()) } diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 3ce9b30963..3a97d3db18 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -682,7 +682,6 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig) error { return nil } -// we need to mimic this func (optr *Operator) syncMachineConfigNodes(_ *renderConfig) error { fg, err := optr.fgAccessor.CurrentFeatureGates() if err != nil { @@ -1206,10 +1205,10 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error { if len(layeredMCPs) != 0 && (!isRunning || !correctReplicaCount) { if !correctReplicaCount { klog.Infof("Adjusting Machine OS Builder pod replica count because MachineConfigPool(s) opted into layering") - return optr.updateMachineOSBuilderDeployment(mob, 1) + return optr.updateMachineOSBuilderDeployment(mob, 1, layeredMCPs) } klog.Infof("Starting Machine OS Builder pod because MachineConfigPool(s) opted into layering") - return optr.startMachineOSBuilderDeployment(mob) + return optr.startMachineOSBuilderDeployment(mob, layeredMCPs) } // If we do not have opted-in pools and the Machine OS Builder deployment is @@ -1221,7 +1220,7 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error { // if we are in ocb, but for some reason we dont need to do an update to the deployment, we still need to validate config if len(layeredMCPs) != 0 { - return build.ValidateOnClusterBuildConfig(optr.kubeClient) + return build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs) } return nil } @@ -1240,8 +1239,8 @@ func (optr *Operator) hasCorrectReplicaCount(mob *appsv1.Deployment) bool { return false } -func (optr *Operator) updateMachineOSBuilderDeployment(mob *appsv1.Deployment, replicas int32) error { - if err := build.ValidateOnClusterBuildConfig(optr.kubeClient); err != nil { +func (optr *Operator) updateMachineOSBuilderDeployment(mob *appsv1.Deployment, replicas int32, layeredMCPs []*mcfgv1.MachineConfigPool) error { + if err := build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs); err != nil { return fmt.Errorf("could not update Machine OS Builder deployment: %w", err) } @@ -1285,8 +1284,8 @@ func (optr *Operator) isMachineOSBuilderRunning(mob *appsv1.Deployment) (bool, e } // Updates the Machine OS Builder Deployment, creating it if it does not exist. -func (optr *Operator) startMachineOSBuilderDeployment(mob *appsv1.Deployment) error { - if err := build.ValidateOnClusterBuildConfig(optr.kubeClient); err != nil { +func (optr *Operator) startMachineOSBuilderDeployment(mob *appsv1.Deployment, layeredMCPs []*mcfgv1.MachineConfigPool) error { + if err := build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs); err != nil { return fmt.Errorf("could not start Machine OS Builder: %w", err) } @@ -1322,6 +1321,22 @@ func (optr *Operator) getLayeredMachineConfigPools() ([]*mcfgv1.MachineConfigPoo return []*mcfgv1.MachineConfigPool{}, err } + if len(pools) == 0 { + moscPools := []*mcfgv1.MachineConfigPool{} + machineosconfigs, err := optr.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return []*mcfgv1.MachineConfigPool{}, err + } + for _, mosc := range machineosconfigs.Items { + mcp, err := optr.mcpLister.Get(mosc.Spec.MachineConfigPool.Name) + if err != nil { + return []*mcfgv1.MachineConfigPool{}, err + } + moscPools = append(moscPools, mcp) + } + return moscPools, nil + } + return pools, nil } diff --git a/test/e2e-techpreview/helpers_test.go b/test/e2e-techpreview/helpers_test.go index 37afe84c64..8a2488898a 100644 --- a/test/e2e-techpreview/helpers_test.go +++ b/test/e2e-techpreview/helpers_test.go @@ -9,6 +9,7 @@ import ( imagev1 "github.com/openshift/api/image/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/framework" "github.com/openshift/machine-config-operator/test/helpers" @@ -19,6 +20,18 @@ import ( "k8s.io/klog/v2" ) +func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1alpha1.MachineOSConfig) func() { + _, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) + require.NoError(t, err) + + t.Logf("Created MachineOSConfig %q", mosc.Name) + + return makeIdempotentAndRegister(t, func() { + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Delete(context.TODO(), mosc.Name, metav1.DeleteOptions{})) + t.Logf("Deleted MachineOSConfig %q", mosc.Name) + }) +} + // Identifies a secret in the MCO namespace that has permissions to push to the ImageStream used for the test. func getBuilderPushSecretName(cs *framework.ClientSet) (string, error) { secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) @@ -127,6 +140,21 @@ func copyGlobalPullSecret(t *testing.T, cs *framework.ClientSet) func() { }) } +func waitForMachineOSBuildToReachState(t *testing.T, cs *framework.ClientSet, poolName string, condFunc func(*mcfgv1alpha1.MachineOSBuild, error) (bool, error)) { + mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) + require.NoError(t, err) + + mosbName := fmt.Sprintf("%s-%s-builder", poolName, mcp.Spec.Configuration.Name) + + err = wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { + mosb, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(context.TODO(), mosbName, metav1.GetOptions{}) + + return condFunc(mosb, err) + }) + + require.NoError(t, err, "MachineOSBuild %q did not reach desired state", mosbName) +} + // Waits for the target MachineConfigPool to reach a state defined in a supplied function. func waitForPoolToReachState(t *testing.T, cs *framework.ClientSet, poolName string, condFunc func(*mcfgv1.MachineConfigPool) bool) { err := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { @@ -152,3 +180,63 @@ func makeIdempotentAndRegister(t *testing.T, cleanupFunc func()) func() { t.Cleanup(out) return out } + +// TOOD: Refactor into smaller functions. +func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { + // TODO: Instantiate this by using the label selector library. + labelSelector := "machineconfiguration.openshift.io/desiredConfig,machineconfiguration.openshift.io/buildPod,machineconfiguration.openshift.io/targetMachineConfigPool" + + cmList, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + + require.NoError(t, err) + + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + + require.NoError(t, err) + + mosbList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + + moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + + if len(cmList.Items) == 0 { + t.Logf("No ephemeral ConfigMaps to clean up") + } + + if len(podList.Items) == 0 { + t.Logf("No build pods to clean up") + } + + if len(mosbList.Items) == 0 { + t.Logf("No MachineOSBuilds to clean up") + } + + if len(moscList.Items) == 0 { + t.Logf("No MachineOSConfigs to clean up") + } + + for _, item := range cmList.Items { + t.Logf("Cleaning up ephemeral ConfigMap %q", item.Name) + require.NoError(t, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range podList.Items { + t.Logf("Cleaning up build pod %q", item.Name) + require.NoError(t, cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range moscList.Items { + t.Logf("Cleaning up MachineOSConfig %q", item.Name) + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } + + for _, item := range mosbList.Items { + t.Logf("Cleaning up MachineOSBuild %q", item.Name) + require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Delete(context.TODO(), item.Name, metav1.DeleteOptions{})) + } +} diff --git a/test/e2e-techpreview/onclusterbuild_test.go b/test/e2e-techpreview/onclusterbuild_test.go index 3f508dea01..d6841a965e 100644 --- a/test/e2e-techpreview/onclusterbuild_test.go +++ b/test/e2e-techpreview/onclusterbuild_test.go @@ -7,10 +7,10 @@ import ( "testing" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build" "github.com/openshift/machine-config-operator/test/framework" @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/require" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -64,17 +65,6 @@ type onClusterBuildTestOpts struct { poolName string } -// Tests that an on-cluster build can be performed with the OpenShift Image Builder. -func TestOnClusterBuildsOpenshiftImageBuilder(t *testing.T) { - runOnClusterBuildTest(t, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, - poolName: layeredMCPName, - customDockerfiles: map[string]string{ - layeredMCPName: cowsayDockerfile, - }, - }) -} - // Tests tha an on-cluster build can be performed with the Custom Pod Builder. func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { runOnClusterBuildTest(t, onClusterBuildTestOpts{ @@ -90,7 +80,7 @@ func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { // is rolled out to an opted-in node. func TestOnClusterBuildRollsOutImage(t *testing.T) { imagePullspec := runOnClusterBuildTest(t, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, + imageBuilderType: build.CustomPodImageBuilder, poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, @@ -115,74 +105,50 @@ func runOnClusterBuildTest(t *testing.T, testOpts onClusterBuildTestOpts) string t.Logf("Running with ImageBuilder type: %s", testOpts.imageBuilderType) - prepareForTest(t, cs, testOpts) + mosc := prepareForTest(t, cs, testOpts) - optPoolIntoLayering(t, cs, testOpts.poolName) + t.Cleanup(createMachineOSConfig(t, cs, mosc)) t.Logf("Wait for build to start") - waitForPoolToReachState(t, cs, testOpts.poolName, func(mcp *mcfgv1.MachineConfigPool) bool { - return ctrlcommon.NewLayeredPoolState(mcp).IsBuilding() - }) - - t.Logf("Build started! Waiting for completion...") - imagePullspec := "" - waitForPoolToReachState(t, cs, testOpts.poolName, func(mcp *mcfgv1.MachineConfigPool) bool { - lps := ctrlcommon.NewLayeredPoolState(mcp) - if lps.HasOSImage() && lps.IsBuildSuccess() { - imagePullspec = lps.GetOSImage() - return true + waitForMachineOSBuildToReachState(t, cs, testOpts.poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + // If we had any errors retrieving the build, (other than it not existing yet), stop here. + if err != nil && !k8serrors.IsNotFound(err) { + return false, err } - if lps.IsBuildFailure() { - t.Fatalf("Build unexpectedly failed.") + // The build object has not been created yet, requeue and try again. + if mosb == nil && k8serrors.IsNotFound(err) { + t.Logf("MachineOSBuild does not exist yet, retrying...") + return false, nil } - return false + // At this point, the build object exists and we want to ensure that it is running. + return ctrlcommon.NewMachineOSBuildState(mosb).IsBuilding(), nil }) - t.Logf("MachineConfigPool %q has finished building. Got image: %s", testOpts.poolName, imagePullspec) - - return imagePullspec -} - -// Adds the layeringEnabled label to the target MachineConfigPool and registers -// / returns a function to unlabel it. -func optPoolIntoLayering(t *testing.T, cs *framework.ClientSet, pool string) func() { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{}) - require.NoError(t, err) + t.Logf("Build started! Waiting for completion...") - if mcp.Labels == nil { - mcp.Labels = map[string]string{} + var build *mcfgv1alpha1.MachineOSBuild + waitForMachineOSBuildToReachState(t, cs, testOpts.poolName, func(mosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + if err != nil { + return false, err } - mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + build = mosb - _, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) - if err == nil { - t.Logf("Added label %q to MachineConfigPool %s to opt into layering", ctrlcommon.LayeringEnabledPoolLabel, pool) - } - return err - }) - - require.NoError(t, err) + state := ctrlcommon.NewMachineOSBuildState(mosb) - return makeIdempotentAndRegister(t, func() { - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), pool, metav1.GetOptions{}) - require.NoError(t, err) + if state.IsBuildFailure() { + t.Logf("MachineOSBuild %q unexpectedly failed", mosb.Name) + t.Fail() + } - delete(mcp.Labels, ctrlcommon.LayeringEnabledPoolLabel) + return state.IsBuildSuccess(), nil + }) - _, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) - if err == nil { - t.Logf("Removed label %q to MachineConfigPool %s to opt out of layering", ctrlcommon.LayeringEnabledPoolLabel, pool) - } - return err - }) + t.Logf("MachineOSBuild %q has finished building. Got image: %s", build.Name, build.Status.FinalImagePushspec) - require.NoError(t, err) - }) + return build.Status.FinalImagePushspec } // Prepares for an on-cluster build test by performing the following: @@ -195,10 +161,18 @@ func optPoolIntoLayering(t *testing.T, cs *framework.ClientSet, pool string) fun // // Each of the object creation steps registers an idempotent cleanup function // that will delete the object at the end of the test. -func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBuildTestOpts) { +// +// Returns a MachineOSConfig object for the caller to create to begin the build +// process. +func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBuildTestOpts) *mcfgv1alpha1.MachineOSConfig { pushSecretName, err := getBuilderPushSecretName(cs) require.NoError(t, err) + // Register ephemeral object cleanup function. + t.Cleanup(func() { + cleanupEphemeralBuildObjects(t, cs) + }) + imagestreamName := "os-image" t.Cleanup(createImagestream(t, cs, imagestreamName)) @@ -207,30 +181,51 @@ func prepareForTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterBui finalPullspec, err := getImagestreamPullspec(cs, imagestreamName) require.NoError(t, err) - cmCleanup := createConfigMap(t, cs, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: build.OnClusterBuildConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: map[string]string{ - build.BaseImagePullSecretNameConfigKey: globalPullSecretCloneName, - build.FinalImagePushSecretNameConfigKey: pushSecretName, - build.FinalImagePullspecConfigKey: finalPullspec, - build.ImageBuilderTypeConfigMapKey: string(testOpts.imageBuilderType), - }, - }) - - t.Cleanup(cmCleanup) - t.Cleanup(makeIdempotentAndRegister(t, helpers.CreateMCP(t, cs, testOpts.poolName))) - t.Cleanup(createCustomDockerfileConfigMap(t, cs, testOpts.customDockerfiles)) - _, err = helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, "00-worker") require.NoError(t, err) + + return &mcfgv1alpha1.MachineOSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: testOpts.poolName, + }, + Spec: mcfgv1alpha1.MachineOSConfigSpec{ + MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{ + Name: testOpts.poolName, + }, + BuildInputs: mcfgv1alpha1.BuildInputs{ + BaseImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ + Name: globalPullSecretCloneName, + }, + RenderedImagePushSecret: mcfgv1alpha1.ImageSecretObjectReference{ + Name: pushSecretName, + }, + RenderedImagePushspec: finalPullspec, + ImageBuilder: &mcfgv1alpha1.MachineOSImageBuilder{ + ImageBuilderType: mcfgv1alpha1.PodBuilder, + }, + Containerfile: []mcfgv1alpha1.MachineOSContainerfile{ + { + ContainerfileArch: mcfgv1alpha1.NoArch, + Content: testOpts.customDockerfiles[testOpts.poolName], + }, + }, + }, + BuildOutputs: mcfgv1alpha1.BuildOutputs{ + CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ + // TODO: Can this use the global pull secret? Or should this use the + // push secret name for now? + Name: pushSecretName, + }, + }, + }, + } } func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { + t.Skip() + cs := framework.NewClientSet("") // label random node from pool, get the node @@ -239,7 +234,6 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { // prepare for on cluster build test prepareForTest(t, cs, onClusterBuildTestOpts{ - imageBuilderType: build.OpenshiftImageBuilder, poolName: layeredMCPName, customDockerfiles: map[string]string{}, }) diff --git a/test/framework/clientset.go b/test/framework/clientset.go index 6c4e2dca78..caa2016c1a 100644 --- a/test/framework/clientset.go +++ b/test/framework/clientset.go @@ -8,6 +8,7 @@ import ( clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" clientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" + clientmachineconfigv1alpha1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1" clientoperatorsv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" clientapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -26,6 +27,7 @@ type ClientSet struct { clientoperatorsv1alpha1.OperatorV1alpha1Interface clientbuildv1.BuildV1Interface clientimagev1.ImageV1Interface + clientmachineconfigv1alpha1.MachineconfigurationV1alpha1Interface kubeconfig string config *rest.Config } @@ -72,14 +74,15 @@ func NewClientSet(kubeconfig string) *ClientSet { // NewClientSetFromConfig returns a *ClientBuilder with the given rest config. func NewClientSetFromConfig(config *rest.Config) *ClientSet { return &ClientSet{ - CoreV1Interface: corev1client.NewForConfigOrDie(config), - AppsV1Interface: appsv1client.NewForConfigOrDie(config), - ConfigV1Interface: clientconfigv1.NewForConfigOrDie(config), - MachineconfigurationV1Interface: clientmachineconfigv1.NewForConfigOrDie(config), - ApiextensionsV1Interface: clientapiextensionsv1.NewForConfigOrDie(config), - OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), - BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), - ImageV1Interface: clientimagev1.NewForConfigOrDie(config), - config: config, + CoreV1Interface: corev1client.NewForConfigOrDie(config), + AppsV1Interface: appsv1client.NewForConfigOrDie(config), + ConfigV1Interface: clientconfigv1.NewForConfigOrDie(config), + MachineconfigurationV1Interface: clientmachineconfigv1.NewForConfigOrDie(config), + ApiextensionsV1Interface: clientapiextensionsv1.NewForConfigOrDie(config), + OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), + BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), + ImageV1Interface: clientimagev1.NewForConfigOrDie(config), + MachineconfigurationV1alpha1Interface: clientmachineconfigv1alpha1.NewForConfigOrDie(config), + config: config, } } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml index 26bd68689c..6e3112a961 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml @@ -155,4 +155,4 @@ spec: served: true storage: true subresources: - status: {} + status: {} \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644