diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 6d9c0c232c..774707d400 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -124,6 +124,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), ctx.OpenShiftConfigKubeNamespacedInformerFactory.Core().V1().Secrets(), ctx.ConfigInformerFactory.Config().V1().FeatureGates(), + ctx.OperatorInformerFactory.Operator().V1().Storages(), ctx.ClientBuilder.KubeClientOrDie("template-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("template-controller"), ), diff --git a/go.mod b/go.mod index bbdfd081d6..5af67fe976 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/google/renameio v0.1.0 github.com/imdario/mergo v0.3.13 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20230221095031-69130006bb23 + github.com/openshift/api v0.0.0-20230330150608-05635858d40f github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea github.com/openshift/library-go v0.0.0-20230112164258-24668b1349e6 github.com/openshift/runtime-utils v0.0.0-20220926190846-5c488b20a19f diff --git a/go.sum b/go.sum index dce1180866..2abda238ab 100644 --- a/go.sum +++ b/go.sum @@ -809,8 +809,8 @@ github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/api v0.0.0-20230221095031-69130006bb23 h1:6hkSewbomhxN9+WQhT1ABANfZOJCjAzvBPSQe1OMbRs= -github.com/openshift/api v0.0.0-20230221095031-69130006bb23/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= +github.com/openshift/api v0.0.0-20230330150608-05635858d40f h1:mGpCtfoehMcvmg/sSYLiv6nCbTl04cmtkUfYzP7H1AQ= +github.com/openshift/api v0.0.0-20230330150608-05635858d40f/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea h1:7JbjIzWt3Q75ErY1PAZ+gCA+bErI6HSlpffHFmMMzqM= github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea/go.mod h1:+J8DqZC60acCdpYkwVy/KH4cudgWiFZRNOBeghCzdGA= github.com/openshift/library-go v0.0.0-20230112164258-24668b1349e6 h1:c0NBJDDuW1bob6E7o9L99JGUBt89iY59QJfUtwU5lXE= diff --git a/manifests/machineconfigcontroller/clusterrole.yaml b/manifests/machineconfigcontroller/clusterrole.yaml index 7e24c4f8df..e87d5b458b 100644 --- a/manifests/machineconfigcontroller/clusterrole.yaml +++ b/manifests/machineconfigcontroller/clusterrole.yaml @@ -55,3 +55,11 @@ rules: - leases verbs: - "*" +- apiGroups: + - operator.openshift.io + resources: + - storages + verbs: + - get + - list + - watch diff --git a/pkg/controller/bootstrap/bootstrap.go b/pkg/controller/bootstrap/bootstrap.go index 1132825a46..5260f07fe5 100644 --- a/pkg/controller/bootstrap/bootstrap.go +++ b/pkg/controller/bootstrap/bootstrap.go @@ -19,6 +19,7 @@ import ( kscheme "k8s.io/client-go/kubernetes/scheme" apicfgv1 "github.com/openshift/api/config/v1" + apioperatorsv1 "github.com/openshift/api/operator/v1" apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -68,14 +69,16 @@ func (b *Bootstrap) Run(destDir string) error { scheme := runtime.NewScheme() mcfgv1.Install(scheme) + apioperatorsv1.Install(scheme) apioperatorsv1alpha1.Install(scheme) apicfgv1.Install(scheme) codecFactory := serializer.NewCodecFactory(scheme) - decoder := codecFactory.UniversalDecoder(mcfgv1.GroupVersion, apioperatorsv1alpha1.GroupVersion, apicfgv1.GroupVersion) + decoder := codecFactory.UniversalDecoder(mcfgv1.GroupVersion, apioperatorsv1.GroupVersion, apioperatorsv1alpha1.GroupVersion, apicfgv1.GroupVersion) var cconfig *mcfgv1.ControllerConfig var featureGate *apicfgv1.FeatureGate var nodeConfig *apicfgv1.Node + var storageConfig *apioperatorsv1.Storage var kconfigs []*mcfgv1.KubeletConfig var pools []*mcfgv1.MachineConfigPool var configs []*mcfgv1.MachineConfig @@ -138,6 +141,10 @@ func (b *Bootstrap) Run(destDir string) error { if obj.GetName() == ctrlcommon.ClusterNodeInstanceName { nodeConfig = obj } + case *apioperatorsv1.Storage: + if obj.GetName() == ctrlcommon.ClusterStorageInstanceName { + storageConfig = obj + } default: glog.Infof("skipping %q [%d] manifest because of unhandled %T", file.Name(), idx+1, obji) } @@ -147,7 +154,7 @@ func (b *Bootstrap) Run(destDir string) error { if cconfig == nil { return fmt.Errorf("error: no controllerconfig found in dir: %q", destDir) } - iconfigs, err := template.RunBootstrap(b.templatesDir, cconfig, psraw, featureGate) + iconfigs, err := template.RunBootstrap(b.templatesDir, cconfig, psraw, featureGate, storageConfig) if err != nil { return err } diff --git a/pkg/controller/bootstrap/testdata/bootstrap/machineconfigcontroller-controllerconfig.yaml b/pkg/controller/bootstrap/testdata/bootstrap/machineconfigcontroller-controllerconfig.yaml index 9ec0e21567..61e7ddb849 100644 --- a/pkg/controller/bootstrap/testdata/bootstrap/machineconfigcontroller-controllerconfig.yaml +++ b/pkg/controller/bootstrap/testdata/bootstrap/machineconfigcontroller-controllerconfig.yaml @@ -32,6 +32,9 @@ spec: controlPlaneTopology: HighlyAvailable platformStatus: type: None + dns: + spec: + baseDomain: domain.example.com kubeAPIServerServingCAData: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCktVQkUgQVBJIFNFUlZFUiBTRVJWSU5HIENBIERBVEEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= osImageURL: registry.product.example.org/ocp/4.2-DATE-VERSION@sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee releaseImage: release-registry.product.example.org/ocp/4.2-date-version@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 12177c3618..a7afbfe4a7 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -37,6 +37,9 @@ const ( // ClusterNodeInstanceName is a singleton name for node configuration ClusterNodeInstanceName = "cluster" + // ClusterStorageInstanceName is a singleton name for storage configuration + ClusterStorageInstanceName = "cluster" + // MachineConfigPoolMaster is the MachineConfigPool name given to the master MachineConfigPoolMaster = "master" // MachineConfigPoolWorker is the MachineConfigPool name given to the worker diff --git a/pkg/controller/template/render.go b/pkg/controller/template/render.go index 0f8c44eb37..15ccde5db4 100644 --- a/pkg/controller/template/render.go +++ b/pkg/controller/template/render.go @@ -13,6 +13,7 @@ import ( "github.com/golang/glog" configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/library-go/pkg/cloudprovider" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" @@ -27,6 +28,10 @@ type RenderConfig struct { PullSecret string FeatureGate *configv1.FeatureGate + // for making decision about vSphere cloud provider kubelet flag + // for more info see: https://issues.redhat.com/browse/STOR-1265 + StorageConfig *operatorv1.Storage + // no need to set this, will be automatically configured Constants map[string]string } @@ -362,6 +367,7 @@ func renderTemplate(config RenderConfig, path string, b []byte) ([]byte, error) funcs["onPremPlatformShortName"] = onPremPlatformShortName funcs["urlHost"] = urlHost funcs["urlPort"] = urlPort + funcs["vSphereCSIMigration"] = isVSphereCSIMigrationEnabled funcs["isOpenShiftManagedDefaultLB"] = isOpenShiftManagedDefaultLB tmpl, err := template.New(path).Funcs(funcs).Parse(string(b)) if err != nil { @@ -391,9 +397,38 @@ func skipMissing(key string) (interface{}, error) { return fmt.Sprintf("{{.%s}}", key), nil } +// isCloudProviderExternal is a wrapper for the library-go IsCloudProviderExternal function, it is needed +// to address an issue related to the vSphere in-tree storage driver and the CSI migration feature gate. +// On vSphere, this function will check the storage operator configuration to determine if the in-tree +// driver is in use, if it is this function will return false. Adding this function in the MCO allows +// us to have a configuration whereby the kubelet can use the in-tree cloud controller logic to configure +// itself, while allowing the kube-apiserver and kube-controller-manager to use the external cloud controllers. +// For more information about the root issue, please see the following links: +// https://github.com/kubernetes/kubernetes/pull/116342 +// https://issues.redhat.com/browse/STOR-1265 +func isCloudProviderExternal(platformStatus *configv1.PlatformStatus, featureGate *configv1.FeatureGate, storageConfig *operatorv1.Storage) (bool, error) { + if platformStatus == nil { + return false, fmt.Errorf("platformStatus is required") + } + switch platformStatus.Type { + case configv1.VSpherePlatformType: + // Platforms that are external based on feature gate presence + external, err := cloudprovider.IsCloudProviderExternal(platformStatus, featureGate) + if err != nil { + return external, nil + } + isexternal := external && (storageConfig == nil || storageConfig.Spec.VSphereStorageDriver == operatorv1.CSIWithMigrationDriver) + return isexternal, nil + default: + return cloudprovider.IsCloudProviderExternal(platformStatus, featureGate) + } +} + func cloudProvider(cfg RenderConfig) (interface{}, error) { if cfg.Infra.Status.PlatformStatus != nil { - external, err := cloudprovider.IsCloudProviderExternal(cfg.Infra.Status.PlatformStatus, cfg.FeatureGate) + // check to see if the external cloud controller manager should be specified on the command line --cloud-provider flag, + // see the comments for isCloudProviderExternal for more information about the reasons for this wrapper function. + external, err := isCloudProviderExternal(cfg.Infra.Status.PlatformStatus, cfg.FeatureGate, cfg.StorageConfig) if err != nil { glog.Error(err) } else if external { @@ -438,7 +473,7 @@ func cloudConfigFlag(cfg RenderConfig) interface{} { } } - external, err := cloudprovider.IsCloudProviderExternal(cfg.Infra.Status.PlatformStatus, cfg.FeatureGate) + external, err := isCloudProviderExternal(cfg.Infra.Status.PlatformStatus, cfg.FeatureGate, cfg.StorageConfig) if err != nil { glog.Error(err) } else if external { @@ -708,3 +743,23 @@ func isOpenShiftManagedDefaultLB(cfg RenderConfig) bool { } return false } + +func isVSphereCSIMigrationEnabled(cfg RenderConfig) interface{} { + const enabled = "enabled" + const disabled = "disabled" + + // The only time we expect this to be nil is during bootstrap when the + // Storage CR doesn't exist yet, and it should enabled for new installs. + if cfg.StorageConfig == nil { + return enabled + } + + // If the Storage CR exists and migration is enabled, set that in the template. + if cfg.StorageConfig.Spec.VSphereStorageDriver == operatorv1.CSIWithMigrationDriver { + return enabled + } + + // Upgraded clusters will default to disabled, until the Storage CR is modified + // to explicitly opt-in to the migration on vSphere. + return disabled +} diff --git a/pkg/controller/template/render_test.go b/pkg/controller/template/render_test.go index d26e3d8471..0d829aef2a 100644 --- a/pkg/controller/template/render_test.go +++ b/pkg/controller/template/render_test.go @@ -12,6 +12,7 @@ import ( ign3types "github.com/coreos/ignition/v2/config/v3_2/types" configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/library-go/pkg/cloudprovider" "k8s.io/client-go/kubernetes/scheme" @@ -30,6 +31,7 @@ func TestCloudProvider(t *testing.T) { cases := []struct { platform configv1.PlatformType featureGate *configv1.FeatureGate + storageConf *operatorv1.Storage res string }{{ platform: configv1.AWSPlatformType, @@ -50,7 +52,13 @@ func TestCloudProvider(t *testing.T) { }, { platform: configv1.VSpherePlatformType, featureGate: newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil), + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.CSIWithMigrationDriver}}, res: "external", + }, { + platform: configv1.VSpherePlatformType, + featureGate: newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil), + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.LegacyDeprecatedInTreeDriver}}, + res: "vsphere", }, { platform: configv1.OpenStackPlatformType, featureGate: newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil), @@ -89,8 +97,13 @@ func TestCloudProvider(t *testing.T) { platform: configv1.NonePlatformType, res: "", }, { - platform: configv1.VSpherePlatformType, - res: "external", + platform: configv1.VSpherePlatformType, + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.CSIWithMigrationDriver}}, + res: "external", + }, { + platform: configv1.VSpherePlatformType, + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.LegacyDeprecatedInTreeDriver}}, + res: "vsphere", }, { platform: configv1.AlibabaCloudPlatformType, res: "external", @@ -113,7 +126,7 @@ func TestCloudProvider(t *testing.T) { }, }, } - got, err := renderTemplate(RenderConfig{&config.Spec, `{"dummy":"dummy"}`, c.featureGate, nil}, name, dummyTemplate) + got, err := renderTemplate(RenderConfig{&config.Spec, `{"dummy":"dummy"}`, c.featureGate, c.storageConf, nil}, name, dummyTemplate) if err != nil { t.Fatalf("expected nil error %v", err) } @@ -132,6 +145,7 @@ func TestCloudConfigFlag(t *testing.T) { platform configv1.PlatformType content string featureGate *configv1.FeatureGate + storageConf *operatorv1.Storage res string }{{ platform: configv1.AWSPlatformType, @@ -188,7 +202,17 @@ func TestCloudConfigFlag(t *testing.T) { option = a `, featureGate: newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil), + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.CSIWithMigrationDriver}}, res: "", + }, { + platform: configv1.VSpherePlatformType, + content: ` +[dummy-config] + option = a +`, + featureGate: newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil), + storageConf: &operatorv1.Storage{Spec: operatorv1.StorageSpec{VSphereStorageDriver: operatorv1.LegacyDeprecatedInTreeDriver}}, + res: "--cloud-config=/etc/kubernetes/cloud.conf", }, { platform: configv1.AzurePlatformType, content: ` @@ -255,7 +279,7 @@ func TestCloudConfigFlag(t *testing.T) { CloudProviderConfig: c.content, }, } - got, err := renderTemplate(RenderConfig{&config.Spec, `{"dummy":"dummy"}`, c.featureGate, nil}, name, dummyTemplate) + got, err := renderTemplate(RenderConfig{&config.Spec, `{"dummy":"dummy"}`, c.featureGate, c.storageConf, nil}, name, dummyTemplate) if err != nil { t.Fatalf("expected nil error %v", err) } @@ -346,14 +370,14 @@ func TestInvalidPlatform(t *testing.T) { // we must treat unrecognized constants as "none" controllerConfig.Spec.Infra.Status.PlatformStatus.Type = "_bad_" - _, err = generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil}, templateDir) + _, err = generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil, nil}, templateDir) if err != nil { t.Errorf("expect nil error, got: %v", err) } // explicitly blocked controllerConfig.Spec.Infra.Status.PlatformStatus.Type = "_base" - _, err = generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil}, templateDir) + _, err = generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil, nil}, templateDir) expectErr(err, "failed to create MachineConfig for role master: platform _base unsupported") } @@ -364,7 +388,7 @@ func TestGenerateMachineConfigs(t *testing.T) { t.Fatalf("failed to get controllerconfig config: %v", err) } - cfgs, err := generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil}, templateDir) + cfgs, err := generateTemplateMachineConfigs(&RenderConfig{&controllerConfig.Spec, `{"dummy":"dummy"}`, nil, nil, nil}, templateDir) if err != nil { t.Fatalf("failed to generate machine configs: %v", err) } @@ -483,7 +507,7 @@ func TestGetPaths(t *testing.T) { } c.res = append(c.res, platformBase) - got := getPaths(&RenderConfig{&config.Spec, `{"dummy":"dummy"}`, nil, nil}, config.Spec.Platform) + got := getPaths(&RenderConfig{&config.Spec, `{"dummy":"dummy"}`, nil, nil, nil}, config.Spec.Platform) if reflect.DeepEqual(got, c.res) { t.Fatalf("mismatch got: %s want: %s", got, c.res) } diff --git a/pkg/controller/template/template_controller.go b/pkg/controller/template/template_controller.go index a474778ceb..e037191cc4 100644 --- a/pkg/controller/template/template_controller.go +++ b/pkg/controller/template/template_controller.go @@ -12,8 +12,11 @@ import ( "github.com/golang/glog" configv1 "github.com/openshift/api/config/v1" osev1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" oseinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" oselistersv1 "github.com/openshift/client-go/config/listers/config/v1" + osoperatorinformersv1 "github.com/openshift/client-go/operator/informers/externalversions/operator/v1" + osoperatorlistersv1 "github.com/openshift/client-go/operator/listers/operator/v1" mcoResourceApply "github.com/openshift/machine-config-operator/lib/resourceapply" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -58,14 +61,16 @@ type Controller struct { syncHandler func(ccKey string) error enqueueControllerConfig func(*mcfgv1.ControllerConfig) - ccLister mcfglistersv1.ControllerConfigLister - mcLister mcfglistersv1.MachineConfigLister - featLister oselistersv1.FeatureGateLister + ccLister mcfglistersv1.ControllerConfigLister + mcLister mcfglistersv1.MachineConfigLister + featLister oselistersv1.FeatureGateLister + storageConfLister osoperatorlistersv1.StorageLister ccListerSynced cache.InformerSynced mcListerSynced cache.InformerSynced secretsInformerSynced cache.InformerSynced featListerSynced cache.InformerSynced + storageConfSynced cache.InformerSynced queue workqueue.RateLimitingInterface } @@ -77,6 +82,7 @@ func New( mcInformer mcfginformersv1.MachineConfigInformer, secretsInformer coreinformersv1.SecretInformer, featureInformer oseinformersv1.FeatureGateInformer, + storageInformer osoperatorinformersv1.StorageInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, ) *Controller { @@ -117,16 +123,24 @@ func New( DeleteFunc: ctrl.deleteFeature, }) + storageInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addStorage, + UpdateFunc: ctrl.updateStorage, + DeleteFunc: ctrl.deleteStorage, + }) + ctrl.syncHandler = ctrl.syncControllerConfig ctrl.enqueueControllerConfig = ctrl.enqueue ctrl.ccLister = ccInformer.Lister() ctrl.mcLister = mcInformer.Lister() ctrl.featLister = featureInformer.Lister() + ctrl.storageConfLister = storageInformer.Lister() ctrl.ccListerSynced = ccInformer.Informer().HasSynced ctrl.mcListerSynced = mcInformer.Informer().HasSynced ctrl.secretsInformerSynced = secretsInformer.Informer().HasSynced ctrl.featListerSynced = featureInformer.Informer().HasSynced + ctrl.storageConfSynced = storageInformer.Informer().HasSynced return ctrl } @@ -231,7 +245,7 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ctrl.queue.ShutDown() - if !cache.WaitForCacheSync(stopCh, ctrl.ccListerSynced, ctrl.mcListerSynced, ctrl.secretsInformerSynced, ctrl.featListerSynced) { + if !cache.WaitForCacheSync(stopCh, ctrl.ccListerSynced, ctrl.mcListerSynced, ctrl.secretsInformerSynced, ctrl.featListerSynced, ctrl.storageConfSynced) { return } @@ -341,6 +355,36 @@ func (ctrl *Controller) deleteMachineConfig(obj interface{}) { ctrl.enqueueControllerConfig(cfg) } +func (ctrl *Controller) addStorage(obj interface{}) { + storage := obj.(*operatorv1.Storage) + glog.V(4).Infof("Adding Storage %s", storage.Name) + ctrl.enqueueController() +} + +func (ctrl *Controller) updateStorage(old, cur interface{}) { + oldStorage := old.(*operatorv1.Storage) + glog.V(4).Infof("Updating Storage %s", oldStorage.Name) + ctrl.enqueueController() +} + +func (ctrl *Controller) deleteStorage(obj interface{}) { + storage, ok := obj.(*operatorv1.Storage) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + storage, ok = tombstone.Obj.(*operatorv1.Storage) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Storage %#v", obj)) + return + } + } + glog.V(4).Infof("Deleting Storage %s", storage.Name) + ctrl.enqueueController() +} + func (ctrl *Controller) resolveControllerRef(controllerRef *metav1.OwnerReference) *mcfgv1.ControllerConfig { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. @@ -480,7 +524,13 @@ func (ctrl *Controller) syncControllerConfig(key string) error { glog.V(2).Infof("%v", err) return ctrl.syncFailingStatus(cfg, err) } - mcs, err := getMachineConfigsForControllerConfig(ctrl.templatesDir, cfg, pullSecretRaw, fg) + storageConfig, err := ctrl.storageConfLister.Get(ctrlcommon.ClusterFeatureInstanceName) + if err != nil && !errors.IsNotFound(err) { + err := fmt.Errorf("could not fetch Storage config CR: %w", err) + glog.V(2).Infof("%v", err) + return ctrl.syncFailingStatus(cfg, err) + } + mcs, err := getMachineConfigsForControllerConfig(ctrl.templatesDir, cfg, pullSecretRaw, fg, storageConfig) if err != nil { return ctrl.syncFailingStatus(cfg, err) } @@ -498,7 +548,7 @@ func (ctrl *Controller) syncControllerConfig(key string) error { return ctrl.syncCompletedStatus(cfg) } -func getMachineConfigsForControllerConfig(templatesDir string, config *mcfgv1.ControllerConfig, pullSecretRaw []byte, featureGate *configv1.FeatureGate) ([]*mcfgv1.MachineConfig, error) { +func getMachineConfigsForControllerConfig(templatesDir string, config *mcfgv1.ControllerConfig, pullSecretRaw []byte, featureGate *configv1.FeatureGate, storageConfig *operatorv1.Storage) ([]*mcfgv1.MachineConfig, error) { buf := &bytes.Buffer{} if err := json.Compact(buf, pullSecretRaw); err != nil { return nil, fmt.Errorf("couldn't compact pullsecret %q: %w", string(pullSecretRaw), err) @@ -507,6 +557,7 @@ func getMachineConfigsForControllerConfig(templatesDir string, config *mcfgv1.Co ControllerConfigSpec: &config.Spec, PullSecret: string(buf.Bytes()), FeatureGate: featureGate, + StorageConfig: storageConfig, } mcs, err := generateTemplateMachineConfigs(rc, templatesDir) if err != nil { @@ -523,6 +574,6 @@ func getMachineConfigsForControllerConfig(templatesDir string, config *mcfgv1.Co } // RunBootstrap runs the tempate controller in boostrap mode. -func RunBootstrap(templatesDir string, config *mcfgv1.ControllerConfig, pullSecretRaw []byte, featureGate *configv1.FeatureGate) ([]*mcfgv1.MachineConfig, error) { - return getMachineConfigsForControllerConfig(templatesDir, config, pullSecretRaw, featureGate) +func RunBootstrap(templatesDir string, config *mcfgv1.ControllerConfig, pullSecretRaw []byte, featureGate *configv1.FeatureGate, storageConfig *operatorv1.Storage) ([]*mcfgv1.MachineConfig, error) { + return getMachineConfigsForControllerConfig(templatesDir, config, pullSecretRaw, featureGate, storageConfig) } diff --git a/pkg/controller/template/template_controller_test.go b/pkg/controller/template/template_controller_test.go index 88e9ad1cf2..3c2ea1b099 100644 --- a/pkg/controller/template/template_controller_test.go +++ b/pkg/controller/template/template_controller_test.go @@ -11,6 +11,8 @@ import ( osev1 "github.com/openshift/api/config/v1" oseconfigfake "github.com/openshift/client-go/config/clientset/versioned/fake" oseinformersv1 "github.com/openshift/client-go/config/informers/externalversions" + oseoperatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" + operatorinformers "github.com/openshift/client-go/operator/informers/externalversions" "github.com/openshift/library-go/pkg/cloudprovider" corev1 "k8s.io/api/core/v1" @@ -41,9 +43,10 @@ var ( type fixture struct { t *testing.T - client *fake.Clientset - kubeclient *k8sfake.Clientset - oseclient *oseconfigfake.Clientset + client *fake.Clientset + kubeclient *k8sfake.Clientset + oseclient *oseconfigfake.Clientset + osoperatorclient *oseoperatorfake.Clientset ccLister []*mcfgv1.ControllerConfig mcLister []*mcfgv1.MachineConfig @@ -103,13 +106,22 @@ func (f *fixture) newController() *Controller { f.client = fake.NewSimpleClientset(f.objects...) f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) f.oseclient = oseconfigfake.NewSimpleClientset(f.oseobjects...) + f.osoperatorclient = oseoperatorfake.NewSimpleClientset([]runtime.Object{}...) featinformer := oseinformersv1.NewSharedInformerFactory(f.oseclient, 0) cinformer := coreinformersv1.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) + opinfromer := operatorinformers.NewSharedInformerFactory(f.osoperatorclient, noResyncPeriodFunc()) i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) - c := New(templateDir, - i.Machineconfiguration().V1().ControllerConfigs(), i.Machineconfiguration().V1().MachineConfigs(), cinformer.Core().V1().Secrets(), featinformer.Config().V1().FeatureGates(), - f.kubeclient, f.client) + c := New( + templateDir, + i.Machineconfiguration().V1().ControllerConfigs(), + i.Machineconfiguration().V1().MachineConfigs(), + cinformer.Core().V1().Secrets(), + featinformer.Config().V1().FeatureGates(), + opinfromer.Operator().V1().Storages(), + f.kubeclient, + f.client, + ) c.ccListerSynced = alwaysReady c.mcListerSynced = alwaysReady @@ -304,7 +316,7 @@ func TestCreatesMachineConfigs(t *testing.T) { f.objects = append(f.objects, cc) f.kubeobjects = append(f.kubeobjects, ps) - expMCs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), nil) + expMCs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), nil, nil) if err != nil { t.Fatal(err) } @@ -341,7 +353,7 @@ func TestCreatesMachineConfigsWithFeatureGate(t *testing.T) { f.objects = append(f.objects, cc) f.kubeobjects = append(f.kubeobjects, ps) - expMCs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat) + expMCs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat, nil) if err != nil { t.Fatal(err) } @@ -374,7 +386,7 @@ func TestDoNothing(t *testing.T) { feat := newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil) f.featLister = append(f.featLister, feat) - mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat) + mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat, nil) if err != nil { t.Fatal(err) } @@ -414,7 +426,7 @@ func TestRecreateMachineConfig(t *testing.T) { feat := newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil) f.featLister = append(f.featLister, feat) - mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat) + mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat, nil) if err != nil { t.Fatal(err) } @@ -455,7 +467,7 @@ func TestUpdateMachineConfig(t *testing.T) { feat := newFeatures("cluster", "CustomNoUpgrade", []string{cloudprovider.ExternalCloudProviderFeature}, nil) f.featLister = append(f.featLister, feat) - mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat) + mcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat, nil) if err != nil { t.Fatal(err) } @@ -475,7 +487,7 @@ func TestUpdateMachineConfig(t *testing.T) { f.objects = append(f.objects, mcs[idx]) } - expmcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat) + expmcs, err := getMachineConfigsForControllerConfig(templateDir, cc, []byte(`{"dummy": "dummy"}`), feat, nil) if err != nil { t.Fatal(err) } diff --git a/templates/master/01-master-kubelet/_base/files/kubelet.yaml b/templates/master/01-master-kubelet/_base/files/kubelet.yaml index cb1a416968..cf44bafa43 100644 --- a/templates/master/01-master-kubelet/_base/files/kubelet.yaml +++ b/templates/master/01-master-kubelet/_base/files/kubelet.yaml @@ -28,7 +28,6 @@ contents: APIPriorityAndFairness: true RotateKubeletServerCertificate: true DownwardAPIHugePages: true - OpenShiftPodSecurityAdmission: true RetroactiveDefaultStorageClass: false RotateKubeletServerCertificate: true serverTLSBootstrap: true diff --git a/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml b/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml index 2e68c91e0c..d13b03d7d2 100644 --- a/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml +++ b/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml @@ -15,6 +15,9 @@ contents: | ExecStartPre=/bin/rm -f /var/lib/kubelet/memory_manager_state {{- if eq .IPFamilies "IPv6"}} Environment="KUBELET_NODE_IP=::" +{{- end}} +{{- if eq (vSphereCSIMigration .) "enabled"}} + Environment="OPENSHIFT_DO_VSPHERE_MIGRATION=true" {{- end}} EnvironmentFile=/etc/os-release EnvironmentFile=-/etc/kubernetes/kubelet-workaround diff --git a/templates/worker/01-worker-kubelet/_base/files/kubelet.yaml b/templates/worker/01-worker-kubelet/_base/files/kubelet.yaml index cb1a416968..cf44bafa43 100644 --- a/templates/worker/01-worker-kubelet/_base/files/kubelet.yaml +++ b/templates/worker/01-worker-kubelet/_base/files/kubelet.yaml @@ -28,7 +28,6 @@ contents: APIPriorityAndFairness: true RotateKubeletServerCertificate: true DownwardAPIHugePages: true - OpenShiftPodSecurityAdmission: true RetroactiveDefaultStorageClass: false RotateKubeletServerCertificate: true serverTLSBootstrap: true diff --git a/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml b/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml index 7c477aafbd..9f7b52499d 100644 --- a/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml +++ b/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml @@ -15,6 +15,9 @@ contents: | ExecStartPre=/bin/rm -f /var/lib/kubelet/memory_manager_state {{- if eq .IPFamilies "IPv6"}} Environment="KUBELET_NODE_IP=::" +{{- end}} +{{- if eq (vSphereCSIMigration .) "enabled"}} + Environment="OPENSHIFT_DO_VSPHERE_MIGRATION=true" {{- end}} EnvironmentFile=/etc/os-release EnvironmentFile=-/etc/kubernetes/kubelet-workaround diff --git a/test/e2e-bootstrap/bootstrap_test.go b/test/e2e-bootstrap/bootstrap_test.go index 05ae6455a3..20fd39441f 100644 --- a/test/e2e-bootstrap/bootstrap_test.go +++ b/test/e2e-bootstrap/bootstrap_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" configv1 "github.com/openshift/api/config/v1" + apioperatorsv1 "github.com/openshift/api/operator/v1" apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1" "github.com/openshift/machine-config-operator/internal/clients" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" @@ -61,6 +62,7 @@ func TestE2EBootstrap(t *testing.T) { configv1.Install(scheme.Scheme) mcfgv1.Install(scheme.Scheme) apioperatorsv1alpha1.Install(scheme.Scheme) + apioperatorsv1.Install(scheme.Scheme) baseTestManifests := loadBaseTestManifests(t) @@ -82,8 +84,10 @@ func TestE2EBootstrap(t *testing.T) { testCases := []struct { name string manifests [][]byte + nodeConfig runtime.Object waitForMasterMCs []string waitForWorkerMCs []string + platform configv1.PlatformType }{ { name: "With no additional manifests", @@ -116,15 +120,18 @@ metadata: }, { name: "With a node config manifest empty \"cgroupMode\"", - manifests: [][]byte{ - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster -spec: - workerLatencyProfile: MediumUpdateAverageReaction`), + nodeConfig: &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: configv1.MediumUpdateAverageReaction, + }, }, - waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries"}, waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries", "97-worker-generated-kubelet"}, }, @@ -161,6 +168,18 @@ spec: }, { name: "With a featuregate manifest and a config node manifest", + nodeConfig: &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.NodeSpec{ + CgroupMode: configv1.CgroupModeV2, + }, + }, manifests: [][]byte{ []byte(`apiVersion: config.openshift.io/v1 kind: FeatureGate @@ -168,25 +187,23 @@ metadata: name: cluster spec: featureSet: TechPreviewNoUpgrade`), - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster -spec: - cgroupMode: "v2"`), }, waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries", "98-master-generated-kubelet", "97-master-generated-kubelet"}, waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries", "98-worker-generated-kubelet", "97-worker-generated-kubelet"}, }, { name: "With a config node manifest and without a featuregate manifest", - manifests: [][]byte{ - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster -spec: - cgroupMode: "v2"`), + nodeConfig: &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.NodeSpec{ + CgroupMode: configv1.CgroupModeV2, + }, }, // As the CGroupsV2 feature is GA, 97-{master/worker}-generated-kubelet mcs are expected even without a Techpreview featuregate waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries", "97-master-generated-kubelet"}, @@ -194,14 +211,20 @@ spec: }, { name: "With a node config manifest and a master kubelet config manifest", + nodeConfig: &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: configv1.MediumUpdateAverageReaction, + CgroupMode: configv1.CgroupModeV1, + }, + }, manifests: [][]byte{ - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster -spec: - workerLatencyProfile: MediumUpdateAverageReaction - cgroupMode: "v1"`), []byte(`apiVersion: machineconfiguration.openshift.io/v1 kind: KubeletConfig metadata: @@ -226,13 +249,19 @@ spec: }, { name: "With a node config manifest and a worker kubelet config manifest", + nodeConfig: &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.NodeSpec{ + WorkerLatencyProfile: configv1.MediumUpdateAverageReaction, + }, + }, manifests: [][]byte{ - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster -spec: - workerLatencyProfile: MediumUpdateAverageReaction`), []byte(`apiVersion: machineconfiguration.openshift.io/v1 kind: KubeletConfig metadata: @@ -280,6 +309,60 @@ spec: waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries"}, waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries", "99-worker-generated-kubelet"}, }, + { + name: "With a storage manifest with vSphere CSI driver", + manifests: [][]byte{ + []byte(`apiVersion: operator.openshift.io/v1 +kind: Storage +metadata: + name: cluster +spec: + managementState: Managed + logLevel: Normal + operatorLogLevel: Normal + vsphereStorageDriver: CSIWithMigrationDriver +`), + }, + waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries"}, + waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries"}, + platform: configv1.VSpherePlatformType, + }, + { + name: "With a storage manifest with vSphere legacy driver", + manifests: [][]byte{ + []byte(`apiVersion: operator.openshift.io/v1 +kind: Storage +metadata: + name: cluster +spec: + managementState: Managed + logLevel: Normal + operatorLogLevel: Normal + vsphereStorageDriver: LegacyDeprecatedInTreeDriver +`), + }, + waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries"}, + waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries"}, + platform: configv1.VSpherePlatformType, + }, + { + name: "With a storage manifest with no specified vSphere driver", + manifests: [][]byte{ + []byte(`apiVersion: operator.openshift.io/v1 +kind: Storage +metadata: + name: cluster +spec: + managementState: Managed + logLevel: Normal + operatorLogLevel: Normal + vsphereStorageDriver: "" +`), + }, + waitForMasterMCs: []string{"99-master-ssh", "99-master-generated-registries"}, + waitForWorkerMCs: []string{"99-worker-ssh", "99-worker-generated-registries"}, + platform: configv1.VSpherePlatformType, + }, { name: "With a container runtime config", manifests: [][]byte{ @@ -304,13 +387,34 @@ spec: t.Run(tc.name, func(t *testing.T) { objs := append([]runtime.Object{}, baseTestManifests...) objs = append(objs, loadRawManifests(t, tc.manifests)...) - nodeConfigManifest := [][]byte{ - []byte(`apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster`), + + // If node configurations are specified use them, otherwise fill in a default node object. + // A node object is needed for the renders to complete properly. + if tc.nodeConfig != nil { + objs = append(objs, tc.nodeConfig) + } else { + objs = append(objs, &configv1.Node{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Node", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + }) + } + + // If the platform is specified, make sure we set it in the infrastructure config object. + // The platform type is import for tests like vSphere where different options are expressed depending on the platform. + if tc.platform != "" { + for i, obj := range objs { + if obj.GetObjectKind().GroupVersionKind().Kind == "ControllerConfig" { + obj.(*mcfgv1.ControllerConfig).Spec.Infra.Status.PlatformStatus.Type = tc.platform + objs[i] = obj + break + } + } } - objs = append(objs, loadRawManifests(t, nodeConfigManifest)...) fixture := newTestFixture(t, cfg, objs) // Defer stop after cleanup so that the cleanup happens after the stop (defer unwrapping order) @@ -335,14 +439,14 @@ metadata: require.NoError(t, err) defer os.RemoveAll(srcDir) - // Ensure all the manifests are in the input directory - err = copyDir(bootstrapTestDataDir, srcDir) - require.NoError(t, err) + // Marshall the object manifests and write them to the input directory + for id, obj := range objs { + manifest, err := yaml.Marshal(obj) + require.NoError(t, err) - for id, manifest := range tc.manifests { name := fmt.Sprintf("manifest-%d.yaml", id) path := filepath.Join(srcDir, name) - err := os.WriteFile(path, manifest, 0644) + err = os.WriteFile(path, manifest, 0644) require.NoError(t, err) } @@ -427,6 +531,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), ctx.OpenShiftConfigKubeNamespacedInformerFactory.Core().V1().Secrets(), ctx.ConfigInformerFactory.Config().V1().FeatureGates(), + ctx.OperatorInformerFactory.Operator().V1().Storages(), ctx.ClientBuilder.KubeClientOrDie("template-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("template-controller"), ), @@ -529,7 +634,7 @@ func loadBaseTestManifests(t *testing.T) []runtime.Object { func loadRawManifests(t *testing.T, rawObjs [][]byte) []runtime.Object { codecFactory := serializer.NewCodecFactory(scheme.Scheme) - decoder := codecFactory.UniversalDecoder(corev1GroupVersion, mcfgv1.GroupVersion, apioperatorsv1alpha1.GroupVersion, configv1.GroupVersion) + decoder := codecFactory.UniversalDecoder(corev1GroupVersion, mcfgv1.GroupVersion, apioperatorsv1.GroupVersion, apioperatorsv1alpha1.GroupVersion, configv1.GroupVersion) objs := []runtime.Object{} for _, raw := range rawObjs { diff --git a/test/framework/clientset.go b/test/framework/clientset.go index 761a0e1d2a..fd985e013e 100644 --- a/test/framework/clientset.go +++ b/test/framework/clientset.go @@ -8,6 +8,7 @@ import ( clientbuildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + clientoperatorsv1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" clientoperatorsv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" clientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1" clientapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" @@ -23,6 +24,7 @@ type ClientSet struct { clientconfigv1.ConfigV1Interface clientmachineconfigv1.MachineconfigurationV1Interface clientapiextensionsv1.ApiextensionsV1Interface + clientoperatorsv1.OperatorV1Interface clientoperatorsv1alpha1.OperatorV1alpha1Interface clientbuildv1.BuildV1Interface clientimagev1.ImageV1Interface @@ -71,6 +73,7 @@ func NewClientSetFromConfig(config *rest.Config) *ClientSet { MachineconfigurationV1Interface: clientmachineconfigv1.NewForConfigOrDie(config), ApiextensionsV1Interface: clientapiextensionsv1.NewForConfigOrDie(config), OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), + OperatorV1Interface: clientoperatorsv1.NewForConfigOrDie(config), BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), ImageV1Interface: clientimagev1.NewForConfigOrDie(config), } diff --git a/test/framework/envtest.go b/test/framework/envtest.go index c23974208c..2d4a3d32d3 100644 --- a/test/framework/envtest.go +++ b/test/framework/envtest.go @@ -16,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" configv1 "github.com/openshift/api/config/v1" + apioperatorsv1 "github.com/openshift/api/operator/v1" apioperatorsv1alpha1 "github.com/openshift/api/operator/v1alpha1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" corev1 "k8s.io/api/core/v1" @@ -29,7 +30,7 @@ const ( // TODO: Figure out how to obtain this value programmatically so we don't // have to remember to increment it. - k8sVersion string = "1.22.1" + k8sVersion string = "1.26.1" ) // This is needed because both setup-envtest and the kubebuilder tools assume @@ -111,6 +112,7 @@ func NewTestEnv(t *testing.T) *envtest.Environment { filepath.Join("..", "..", "manifests", "controllerconfig.crd.yaml"), filepath.Join("..", "..", "vendor", "github.com", "openshift", "api", "config", "v1"), filepath.Join("..", "..", "vendor", "github.com", "openshift", "api", "operator", "v1alpha1"), + filepath.Join("..", "..", "vendor", "github.com", "openshift", "api", "operator", "v1"), }, CleanUpAfterUse: true, }, @@ -189,6 +191,16 @@ func CheckCleanEnvironment(t *testing.T, clientSet *ClientSet) { // END: operator.openshift.io/v1alpha1 // ##################################### + // ##################################### + // BEGIN: operator.openshift.io/v1 + // ##################################### + storagesList, err := clientSet.Storages().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + require.Len(t, storagesList.Items, 0) + // ##################################### + // END: operator.openshift.io/v1 + // ##################################### + // ############################# // BEGIN: config.openshift.io/v1 // ############################# @@ -285,6 +297,15 @@ func CleanEnvironment(t *testing.T, clientSet *ClientSet) { // END: operator.openshift.io/v1alpha1 // ##################################### + // ##################################### + // BEGIN: operator.openshift.io/v1 + // ##################################### + err = clientSet.Storages().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}) + require.NoError(t, err) + // ##################################### + // END: operator.openshift.io/v1 + // ##################################### + // ############################# // BEGIN: config.openshift.io/v1 // ############################# @@ -345,6 +366,9 @@ func CreateObjects(t *testing.T, clientSet *ClientSet, objs ...runtime.Object) { case *apioperatorsv1alpha1.ImageContentSourcePolicy: _, err := clientSet.ImageContentSourcePolicies().Create(ctx, tObj, metav1.CreateOptions{}) require.NoError(t, err) + case *apioperatorsv1.Storage: + _, err := clientSet.Storages().Create(ctx, tObj, metav1.CreateOptions{}) + require.NoError(t, err) case *configv1.Image: _, err := clientSet.ConfigV1Interface.Images().Create(ctx, tObj, metav1.CreateOptions{}) require.NoError(t, err) diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml index 3e53b28b9e..7edc7f23a7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: apiservers.config.openshift.io spec: group: config.openshift.io @@ -101,6 +102,7 @@ spec: - "" - identity - aescbc + - aesgcm servingCerts: description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..8ce5214c1d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + - aesgcm + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml index 5c28143d54..75f846a3db 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml @@ -1,16 +1,36 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] APIServer" -crd: 0000_10_config-operator_01_apiserver.crd.yaml +crd: 0000_10_config-operator_01_apiserver-Default.crd.yaml tests: onCreate: - - name: Should be able to create a minimal ClusterOperator + - name: Should be able to create encrypt with aescbc initial: | apiVersion: config.openshift.io/v1 kind: APIServer - spec: {} # No spec is required for a APIServer + spec: + encryption: + type: aescbc + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aescbc + - name: Should be able to create encrypt with aesgcm + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aesgcm expected: | apiVersion: config.openshift.io/v1 kind: APIServer spec: audit: profile: Default + encryption: + type: aesgcm + diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml new file mode 100644 index 0000000000..74aa92b470 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml @@ -0,0 +1,35 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] APIServer" +crd: 0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create encrypt with aescbc + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aescbc + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aescbc + - name: Should be able to create encrypt with aesgcm + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aesgcm + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 31801aacf0..f4b52a2277 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -184,7 +184,7 @@ type APIServerEncryption struct { Type EncryptionType `json:"type,omitempty"` } -// +kubebuilder:validation:Enum="";identity;aescbc +// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm type EncryptionType string const ( @@ -195,6 +195,10 @@ const ( // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key // is used to perform encryption at the datastore layer. EncryptionTypeAESCBC EncryptionType = "aescbc" + + // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESGCM EncryptionType = "aesgcm" ) type APIServerStatus struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index ce7f27ca14..a254cac4fe 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -115,11 +115,11 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ with("NodeSwap"). // sig-node, ehashman, Kubernetes feature gate with("MachineAPIProviderOpenStack"). // openstack, egarcia (#forum-openstack), OCP specific with("InsightsConfigAPI"). // insights, tremes (#ccx), OCP specific - with("CSIInlineVolumeAdmission"). // sig-storage, jdobson, OCP specific with("MatchLabelKeysInPodTopologySpread"). // sig-scheduling, ingvagabund (#forum-workloads), Kubernetes feature gate with("RetroactiveDefaultStorageClass"). // sig-storage, RomanBednar, Kubernetes feature gate with("PDBUnhealthyPodEvictionPolicy"). // sig-apps, atiratree (#forum-workloads), Kubernetes feature gate with("DynamicResourceAllocation"). // sig-scheduling, jchaloup (#forum-workloads), Kubernetes feature gate + with("OpenShiftPodSecurityAdmission"). // bz-auth, stlaz, OCP specific toFeatures(), LatencySensitive: newDefaultFeatures(). with( @@ -133,7 +133,6 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ "APIPriorityAndFairness", // sig-apimachinery, deads2k "RotateKubeletServerCertificate", // sig-pod, sjenning "DownwardAPIHugePages", // sig-node, rphillips - "OpenShiftPodSecurityAdmission", // bz-auth, stlaz, OCP specific }, Disabled: []string{ "RetroactiveDefaultStorageClass", // sig-storage, RomanBednar, Kubernetes feature gate diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml index 2bf1818626..484576c1aa 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml @@ -69,6 +69,19 @@ spec: type: object nullable: true x-kubernetes-preserve-unknown-fields: true + vsphereStorageDriver: + description: 'VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. DEPRECATED: This field will be removed in a future release.' + type: string + enum: + - "" + - LegacyDeprecatedInTreeDriver + - CSIWithMigrationDriver + x-kubernetes-validations: + - rule: self == oldSelf || oldSelf == "" || self == "CSIWithMigrationDriver" + message: VSphereStorageDriver can not be changed once it is set to CSIWithMigrationDriver + x-kubernetes-validations: + - rule: '!has(oldSelf.vsphereStorageDriver) || has(self.vsphereStorageDriver)' + message: VSphereStorageDriver is required once set status: description: status holds observed values from the cluster. They may not be overridden. type: object diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml index 5c7496bca3..54776a1b75 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -53,12 +53,84 @@ spec: driverConfig: description: driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object driverType: - description: "driverType indicates type of CSI driver for which the driverConfig is being applied to. \n Valid values are: \n * vSphere \n Allows configuration of vsphere CSI driver topology. \n --- Consumers should treat unknown values as a NO-OP." + description: 'driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.' enum: - "" + - AWS + - Azure + - GCP - vSphere type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object vSphere: description: vsphere is used to configure the vsphere CSI driver. properties: diff --git a/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml index 42903f22de..63a7ca4543 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml @@ -14,3 +14,91 @@ tests: spec: logLevel: Normal operatorLogLevel: Normal + onUpdate: + - name: Should allow enabling CSI migration for vSphere + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} # No spec is required + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + logLevel: Normal + operatorLogLevel: Normal + - name: Should allow disabling CSI migration for vSphere + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} # No spec is required + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + logLevel: Normal + operatorLogLevel: Normal + - name: Should allow changing LegacyDeprecatedInTreeDriver to CSIWithMigrationDriver + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + logLevel: Normal + operatorLogLevel: Normal + - name: Should not allow changing CSIWithMigrationDriver to LegacyDeprecatedInTreeDriver + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + expectedError: "VSphereStorageDriver can not be changed once it is set to CSIWithMigrationDriver" + - name: Should not allow changing CSIWithMigrationDriver to empty string + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: "" + expectedError: "VSphereStorageDriver can not be changed once it is set to CSIWithMigrationDriver" + - name: Should not allow unsetting VSphereStorageDriver once it is set + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} + expectedError: "VSphereStorageDriver is required once set" diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index b295340152..d6d91c95ad 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -105,10 +105,13 @@ type ClusterCSIDriverSpec struct { } // CSIDriverType indicates type of CSI driver being configured. -// +kubebuilder:validation:Enum="";vSphere +// +kubebuilder:validation:Enum="";AWS;Azure;GCP;vSphere type CSIDriverType string const ( + AWSDriverType CSIDriverType = "AWS" + AzureDriverType CSIDriverType = "Azure" + GCPDriverType CSIDriverType = "GCP" VSphereDriverType CSIDriverType = "vSphere" ) @@ -118,25 +121,129 @@ const ( type CSIDriverConfigSpec struct { // driverType indicates type of CSI driver for which the // driverConfig is being applied to. - // - // Valid values are: - // - // * vSphere - // - // Allows configuration of vsphere CSI driver topology. - // - // --- + // Valid values are: AWS, Azure, GCP, vSphere and omitted. // Consumers should treat unknown values as a NO-OP. - // // +kubebuilder:validation:Required // +unionDiscriminator DriverType CSIDriverType `json:"driverType"` + // aws is used to configure the AWS CSI driver. + // +optional + AWS *AWSCSIDriverConfigSpec `json:"aws,omitempty"` + + // azure is used to configure the Azure CSI driver. + // +optional + Azure *AzureCSIDriverConfigSpec `json:"azure,omitempty"` + + // gcp is used to configure the GCP CSI driver. + // +optional + GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"` + // vsphere is used to configure the vsphere CSI driver. // +optional VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` } +// AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver. +type AWSCSIDriverConfigSpec struct { + // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, + // rather than the default KMS key used by AWS. + // The value may be either the ARN or Alias ARN of a KMS key. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$` + // +optional + KMSKeyARN string `json:"kmsKeyARN,omitempty"` +} + +// AzureDiskEncryptionSet defines the configuration for a disk encryption set. +type AzureDiskEncryptionSet struct { + // subscriptionID defines the Azure subscription that contains the disk encryption set. + // The value should meet the following conditions: + // 1. It should be a 128-bit number. + // 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. + // 3. It should be displayed in five groups separated by hyphens (-). + // 4. The first group should be 8 characters long. + // 5. The second, third, and fourth groups should be 4 characters long. + // 6. The fifth group should be 12 characters long. + // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=36 + // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` + SubscriptionID string `json:"subscriptionID"` + + // resourceGroup defines the Azure resource group that contains the disk encryption set. + // The value should consist of only alphanumberic characters, + // underscores (_), parentheses, hyphens and periods. + // The value should not end in a period and be at most 90 characters in + // length. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=90 + // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` + ResourceGroup string `json:"resourceGroup"` + + // name is the name of the disk encryption set that will be set on the default storage class. + // The value should consist of only alphanumberic characters, + // underscores (_), hyphens, and be at most 80 characters in length. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=80 + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + Name string `json:"name"` +} + +// AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver. +type AzureCSIDriverConfigSpec struct { + // diskEncryptionSet sets the cluster default storage class to encrypt volumes with a + // customer-managed encryption set, rather than the default platform-managed keys. + // +optional + DiskEncryptionSet *AzureDiskEncryptionSet `json:"diskEncryptionSet,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // name is the name of the customer-managed encryption key to be used for disk encryption. + // The value should correspond to an existing KMS key and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +kubebuilder:validation:Required + Name string `json:"name"` + + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // The value should correspond to an existing KMS key ring and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +kubebuilder:validation:Required + KeyRing string `json:"keyRing"` + + // projectID is the ID of the Project in which the KMS Key Ring exists. + // It must be 6 to 30 lowercase letters, digits, or hyphens. + // It must start with a letter. Trailing hyphens are prohibited. + // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` + // +kubebuilder:validation:MinLength:=6 + // +kubebuilder:validation:MaxLength:=30 + // +kubebuilder:validation:Required + ProjectID string `json:"projectID"` + + // location is the GCP location in which the Key Ring exists. + // The value must match an existing GCP location, or "global". + // Defaults to global, if not set. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +optional + Location string `json:"location,omitempty"` +} + +// GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver. +type GCPCSIDriverConfigSpec struct { + // kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied + // encryption keys, rather than the default keys managed by GCP. + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` +} + // VSphereCSIDriverConfigSpec defines properties that // can be configured for vsphere CSI driver. type VSphereCSIDriverConfigSpec struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go index 38ffe26d52..044c9c32ae 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_storage.go +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -26,9 +26,28 @@ type Storage struct { Status StorageStatus `json:"status"` } +// StorageDriverType indicates whether CSI migration should be enabled for drivers where it is optional. +// +kubebuilder:validation:Enum="";LegacyDeprecatedInTreeDriver;CSIWithMigrationDriver +type StorageDriverType string + +const ( + LegacyDeprecatedInTreeDriver StorageDriverType = "LegacyDeprecatedInTreeDriver" + CSIWithMigrationDriver StorageDriverType = "CSIWithMigrationDriver" +) + // StorageSpec is the specification of the desired behavior of the cluster storage operator. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vsphereStorageDriver) || has(self.vsphereStorageDriver)", message="VSphereStorageDriver is required once set" type StorageSpec struct { OperatorSpec `json:",inline"` + + // VSphereStorageDriver indicates the storage driver to use on VSphere clusters. + // Once this field is set to CSIWithMigrationDriver, it can not be changed. + // If this is empty, the platform will choose a good default, + // which may change over time without notice. + // DEPRECATED: This field will be removed in a future release. + // +kubebuilder:validation:XValidation:rule="self == oldSelf || oldSelf == \"\" || self == \"CSIWithMigrationDriver\"",message="VSphereStorageDriver can not be changed once it is set to CSIWithMigrationDriver" + // +optional + VSphereStorageDriver StorageDriverType `json:"vsphereStorageDriver"` } // StorageStatus defines the observed status of the cluster storage operator. diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 2f59a3a451..609219c065 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -13,6 +13,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCSIDriverConfigSpec) DeepCopyInto(out *AWSCSIDriverConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCSIDriverConfigSpec. +func (in *AWSCSIDriverConfigSpec) DeepCopy() *AWSCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AWSCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { *out = *in @@ -233,9 +249,61 @@ func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureCSIDriverConfigSpec) DeepCopyInto(out *AzureCSIDriverConfigSpec) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(AzureDiskEncryptionSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCSIDriverConfigSpec. +func (in *AzureCSIDriverConfigSpec) DeepCopy() *AzureCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AzureCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskEncryptionSet) DeepCopyInto(out *AzureDiskEncryptionSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskEncryptionSet. +func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { + if in == nil { + return nil + } + out := new(AzureDiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSCSIDriverConfigSpec) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(VSphereCSIDriverConfigSpec) @@ -1409,6 +1477,43 @@ func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPCSIDriverConfigSpec) DeepCopyInto(out *GCPCSIDriverConfigSpec) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPCSIDriverConfigSpec. +func (in *GCPCSIDriverConfigSpec) DeepCopy() *GCPCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(GCPCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 51b1b0543c..1248ffb1b0 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -380,9 +380,41 @@ func (StatuspageProvider) SwaggerDoc() map[string]string { return map_StatuspageProvider } +var map_AWSCSIDriverConfigSpec = map[string]string{ + "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", + "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", +} + +func (AWSCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AWSCSIDriverConfigSpec +} + +var map_AzureCSIDriverConfigSpec = map[string]string{ + "": "AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.", + "diskEncryptionSet": "diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys.", +} + +func (AzureCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AzureCSIDriverConfigSpec +} + +var map_AzureDiskEncryptionSet = map[string]string{ + "": "AzureDiskEncryptionSet defines the configuration for a disk encryption set.", + "subscriptionID": "subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378", + "resourceGroup": "resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length.", + "name": "name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length.", +} + +func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string { + return map_AzureDiskEncryptionSet +} + var map_CSIDriverConfigSpec = map[string]string{ "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.", - "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to.\n\nValid values are:\n\n* vSphere\n\nAllows configuration of vsphere CSI driver topology.", + "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", + "aws": "aws is used to configure the AWS CSI driver.", + "azure": "azure is used to configure the Azure CSI driver.", + "gcp": "gcp is used to configure the GCP CSI driver.", "vSphere": "vsphere is used to configure the vsphere CSI driver.", } @@ -426,6 +458,27 @@ func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { return map_ClusterCSIDriverStatus } +var map_GCPCSIDriverConfigSpec = map[string]string{ + "": "GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.", + "kmsKey": "kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP.", +} + +func (GCPCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_GCPCSIDriverConfigSpec +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited.", + "location": "location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or \"global\". Defaults to global, if not set.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + var map_VSphereCSIDriverConfigSpec = map[string]string{ "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", @@ -1557,7 +1610,8 @@ func (StorageList) SwaggerDoc() map[string]string { } var map_StorageSpec = map[string]string{ - "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "vsphereStorageDriver": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. DEPRECATED: This field will be removed in a future release.", } func (StorageSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go deleted file mode 100644 index 8d2f05500b..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "fmt" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/testutil/promlint" -) - -// CollectAndLint registers the provided Collector with a newly created pedantic -// Registry. It then calls GatherAndLint with that Registry and with the -// provided metricNames. -func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { - reg := prometheus.NewPedanticRegistry() - if err := reg.Register(c); err != nil { - return nil, fmt.Errorf("registering collector failed: %w", err) - } - return GatherAndLint(reg, metricNames...) -} - -// GatherAndLint gathers all metrics from the provided Gatherer and checks them -// with the linter in the promlint package. If any metricNames are provided, -// only metrics with those names are checked. -func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { - got, err := g.Gather() - if err != nil { - return nil, fmt.Errorf("gathering metrics failed: %w", err) - } - if metricNames != nil { - got = filterMetrics(got, metricNames) - } - return promlint.NewWithMetricFamilies(got).Lint() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go deleted file mode 100644 index 91b83b5285..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package testutil provides helpers to test code using the prometheus package -// of client_golang. -// -// While writing unit tests to verify correct instrumentation of your code, it's -// a common mistake to mostly test the instrumentation library instead of your -// own code. Rather than verifying that a prometheus.Counter's value has changed -// as expected or that it shows up in the exposition after registration, it is -// in general more robust and more faithful to the concept of unit tests to use -// mock implementations of the prometheus.Counter and prometheus.Registerer -// interfaces that simply assert that the Add or Register methods have been -// called with the expected arguments. However, this might be overkill in simple -// scenarios. The ToFloat64 function is provided for simple inspection of a -// single-value metric, but it has to be used with caution. -// -// End-to-end tests to verify all or larger parts of the metrics exposition can -// be implemented with the CollectAndCompare or GatherAndCompare functions. The -// most appropriate use is not so much testing instrumentation of your code, but -// testing custom prometheus.Collector implementations and in particular whole -// exporters, i.e. programs that retrieve telemetry data from a 3rd party source -// and convert it into Prometheus metrics. -// -// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect -// metrics that have issues with their name, type, or metadata without being -// necessarily invalid, e.g. a counter with a name missing the “_total” suffix. -package testutil - -import ( - "bytes" - "fmt" - "io" - "net/http" - "reflect" - - "github.com/davecgh/go-spew/spew" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/internal" -) - -// ToFloat64 collects all Metrics from the provided Collector. It expects that -// this results in exactly one Metric being collected, which must be a Gauge, -// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns -// the value of the collected Metric. -// -// The Collector provided is typically a simple instance of Gauge or Counter, or -// – less commonly – a GaugeVec or CounterVec with exactly one element. But any -// Collector fulfilling the prerequisites described above will do. -// -// Use this function with caution. It is computationally very expensive and thus -// not suited at all to read values from Metrics in regular code. This is really -// only for testing purposes, and even for testing, other approaches are often -// more appropriate (see this package's documentation). -// -// A clear anti-pattern would be to use a metric type from the prometheus -// package to track values that are also needed for something else than the -// exposition of Prometheus metrics. For example, you would like to track the -// number of items in a queue because your code should reject queuing further -// items if a certain limit is reached. It is tempting to track the number of -// items in a prometheus.Gauge, as it is then easily available as a metric for -// exposition, too. However, then you would need to call ToFloat64 in your -// regular code, potentially quite often. The recommended way is to track the -// number of items conventionally (in the way you would have done it without -// considering Prometheus metrics) and then expose the number with a -// prometheus.GaugeFunc. -func ToFloat64(c prometheus.Collector) float64 { - var ( - m prometheus.Metric - mCount int - mChan = make(chan prometheus.Metric) - done = make(chan struct{}) - ) - - go func() { - for m = range mChan { - mCount++ - } - close(done) - }() - - c.Collect(mChan) - close(mChan) - <-done - - if mCount != 1 { - panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount)) - } - - pb := &dto.Metric{} - if err := m.Write(pb); err != nil { - panic(fmt.Errorf("error happened while collecting metrics: %w", err)) - } - if pb.Gauge != nil { - return pb.Gauge.GetValue() - } - if pb.Counter != nil { - return pb.Counter.GetValue() - } - if pb.Untyped != nil { - return pb.Untyped.GetValue() - } - panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb)) -} - -// CollectAndCount registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCount with that Registry and with -// the provided metricNames. In the unlikely case that the registration or the -// gathering fails, this function panics. (This is inconsistent with the other -// CollectAnd… functions in this package and has historical reasons. Changing -// the function signature would be a breaking change and will therefore only -// happen with the next major version bump.) -func CollectAndCount(c prometheus.Collector, metricNames ...string) int { - reg := prometheus.NewPedanticRegistry() - if err := reg.Register(c); err != nil { - panic(fmt.Errorf("registering collector failed: %w", err)) - } - result, err := GatherAndCount(reg, metricNames...) - if err != nil { - panic(err) - } - return result -} - -// GatherAndCount gathers all metrics from the provided Gatherer and counts -// them. It returns the number of metric children in all gathered metric -// families together. If any metricNames are provided, only metrics with those -// names are counted. -func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { - got, err := g.Gather() - if err != nil { - return 0, fmt.Errorf("gathering metrics failed: %w", err) - } - if metricNames != nil { - got = filterMetrics(got, metricNames) - } - - result := 0 - for _, mf := range got { - result += len(mf.GetMetric()) - } - return result, nil -} - -// ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in -// plain text format. Then it compares it with the results that the `expected` would return. -// If the `metricNames` is not empty it would filter the comparison only to the given metric names. -func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("scraping metrics failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("the scraping target returned a status code other than 200: %d", - resp.StatusCode) - } - - scraped, err := convertReaderToMetricFamily(resp.Body) - if err != nil { - return err - } - - wanted, err := convertReaderToMetricFamily(expected) - if err != nil { - return err - } - - return compareMetricFamilies(scraped, wanted, metricNames...) -} - -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. -func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { - reg := prometheus.NewPedanticRegistry() - if err := reg.Register(c); err != nil { - return fmt.Errorf("registering collector failed: %w", err) - } - return GatherAndCompare(reg, expected, metricNames...) -} - -// GatherAndCompare gathers all metrics from the provided Gatherer and compares -// it to an expected output read from the provided Reader in the Prometheus text -// exposition format. If any metricNames are provided, only metrics with those -// names are compared. -func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { - return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) -} - -// TransactionalGatherAndCompare gathers all metrics from the provided Gatherer and compares -// it to an expected output read from the provided Reader in the Prometheus text -// exposition format. If any metricNames are provided, only metrics with those -// names are compared. -func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { - got, done, err := g.Gather() - defer done() - if err != nil { - return fmt.Errorf("gathering metrics failed: %w", err) - } - - wanted, err := convertReaderToMetricFamily(expected) - if err != nil { - return err - } - - return compareMetricFamilies(got, wanted, metricNames...) -} - -// convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of -// dto.MetricFamily. -func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { - var tp expfmt.TextParser - notNormalized, err := tp.TextToMetricFamilies(reader) - if err != nil { - return nil, fmt.Errorf("converting reader to metric families failed: %w", err) - } - - return internal.NormalizeMetricFamilies(notNormalized), nil -} - -// compareMetricFamilies would compare 2 slices of metric families, and optionally filters both of -// them to the `metricNames` provided. -func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error { - if metricNames != nil { - got = filterMetrics(got, metricNames) - } - - return compare(got, expected) -} - -// compare encodes both provided slices of metric families into the text format, -// compares their string message, and returns an error if they do not match. -// The error contains the encoded text of both the desired and the actual -// result. -func compare(got, want []*dto.MetricFamily) error { - var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) - for _, mf := range got { - if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding gathered metrics failed: %w", err) - } - } - enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) - for _, mf := range want { - if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding expected metrics failed: %w", err) - } - } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { - return fmt.Errorf(diffErr) - } - return nil -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { - var filtered []*dto.MetricFamily - for _, m := range metrics { - for _, name := range names { - if m.GetName() == name { - filtered = append(filtered, m) - break - } - } - } - return filtered -} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/modules.txt b/vendor/modules.txt index 39d19b558a..9fa331d3af 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -766,7 +766,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20230221095031-69130006bb23 +# github.com/openshift/api v0.0.0-20230330150608-05635858d40f ## explicit; go 1.19 github.com/openshift/api github.com/openshift/api/apiserver @@ -923,7 +923,6 @@ github.com/proglottis/gpgme github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9