diff --git a/Makefile b/Makefile
index 271288e160e..7288554bb30 100644
--- a/Makefile
+++ b/Makefile
@@ -143,7 +143,7 @@ app-sre-saas-template: hypershift
# Run tests
.PHONY: test
test: build
- $(GO) test -race -count=25 ./... -coverprofile cover.out
+ $(GO) test -race -count=25 -timeout=30m ./... -coverprofile cover.out
.PHONY: e2e
e2e:
@@ -216,13 +216,14 @@ ci-install-hypershift-private:
--oidc-storage-provider-s3-credentials=/etc/hypershift-pool-aws-credentials/credentials \
--oidc-storage-provider-s3-bucket-name=hypershift-ci-oidc \
--oidc-storage-provider-s3-region=us-east-1 \
- --enable-webhook \
+ --enable-validating-webhook \
--private-platform=AWS \
--aws-private-creds=/etc/hypershift-pool-aws-credentials/credentials \
--aws-private-region=us-east-1 \
--external-dns-provider=aws \
--external-dns-credentials=/etc/hypershift-pool-aws-credentials/credentials \
- --external-dns-domain-filter=service.ci.hypershift.devcluster.openshift.com
+ --external-dns-domain-filter=service.ci.hypershift.devcluster.openshift.com \
+ --wait-until-available
.PHONY: ci-test-e2e
ci-test-e2e:
diff --git a/api/fixtures/example.go b/api/fixtures/example.go
index 895b5d03499..37fd2b292fb 100644
--- a/api/fixtures/example.go
+++ b/api/fixtures/example.go
@@ -8,12 +8,11 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"github.com/openshift/hypershift/api/util/ipnet"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
configv1 "github.com/openshift/api/config/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -133,7 +132,7 @@ func (o ExampleOptions) Resources() *ExampleResources {
var resources []crclient.Object
var services []hyperv1.ServicePublishingStrategyMapping
var secretEncryption *hyperv1.SecretEncryptionSpec
- var globalOpts []runtime.RawExtension
+ var proxyConfig *configv1.ProxySpec
switch {
case o.AWS != nil:
@@ -183,16 +182,10 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
}
if o.AWS.ProxyAddress != "" {
- globalOpts = append(globalOpts, runtime.RawExtension{Object: &configv1.Proxy{
- TypeMeta: metav1.TypeMeta{
- APIVersion: configv1.GroupVersion.String(),
- Kind: "Proxy",
- },
- Spec: configv1.ProxySpec{
- HTTPProxy: o.AWS.ProxyAddress,
- HTTPSProxy: o.AWS.ProxyAddress,
- },
- }})
+ proxyConfig = &configv1.ProxySpec{
+ HTTPProxy: o.AWS.ProxyAddress,
+ HTTPSProxy: o.AWS.ProxyAddress,
+ }
}
if kmsCredsSecret != nil {
@@ -390,7 +383,6 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
},
KubeCloudControllerCreds: corev1.LocalObjectReference{Name: powerVSResources.KubeCloudControllerCreds.Name},
NodePoolManagementCreds: corev1.LocalObjectReference{Name: powerVSResources.NodePoolManagementCreds.Name},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{Name: powerVSResources.ControlPlaneOperatorCreds.Name},
IngressOperatorCloudCreds: corev1.LocalObjectReference{Name: powerVSResources.IngressOperatorCloudCreds.Name},
},
}
@@ -474,8 +466,8 @@ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
cluster.Spec.Networking.MachineNetwork = []hyperv1.MachineNetworkEntry{{CIDR: *ipnet.MustParseCIDR(o.MachineCIDR)}}
}
- if len(globalOpts) > 0 {
- cluster.Spec.Configuration = &hyperv1.ClusterConfiguration{Items: globalOpts}
+ if proxyConfig != nil {
+ cluster.Spec.Configuration = &hyperv1.ClusterConfiguration{Proxy: proxyConfig}
}
var userCABundleCM *corev1.ConfigMap
diff --git a/api/fixtures/example_aws.go b/api/fixtures/example_aws.go
index 2b2b903dbea..3f7e10c093f 100644
--- a/api/fixtures/example_aws.go
+++ b/api/fixtures/example_aws.go
@@ -1,7 +1,7 @@
package fixtures
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/api/fixtures/example_ibmcloud_powervs.go b/api/fixtures/example_ibmcloud_powervs.go
index 9f8aa491805..873923edfd8 100644
--- a/api/fixtures/example_ibmcloud_powervs.go
+++ b/api/fixtures/example_ibmcloud_powervs.go
@@ -1,6 +1,7 @@
package fixtures
import (
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -22,7 +23,7 @@ type ExamplePowerVSOptions struct {
// nodepool related options
SysType string
- ProcType string
+ ProcType hyperv1.PowerVSNodePoolProcType
Processors string
Memory int32
}
diff --git a/api/fixtures/example_kubevirt.go b/api/fixtures/example_kubevirt.go
index 3f8bbe12776..d8a2c576f84 100644
--- a/api/fixtures/example_kubevirt.go
+++ b/api/fixtures/example_kubevirt.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
)
diff --git a/api/scheme.go b/api/scheme.go
index ed5803310d0..99cdfcea886 100644
--- a/api/scheme.go
+++ b/api/scheme.go
@@ -6,7 +6,8 @@ import (
routev1 "github.com/openshift/api/route/v1"
securityv1 "github.com/openshift/api/security/v1"
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1alpha1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1beta1 "github.com/openshift/hypershift/api/v1beta1"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@@ -44,7 +45,8 @@ func init() {
capiaws.AddToScheme(Scheme)
capiibm.AddToScheme(Scheme)
clientgoscheme.AddToScheme(Scheme)
- hyperv1.AddToScheme(Scheme)
+ hyperv1alpha1.AddToScheme(Scheme)
+ hyperv1beta1.AddToScheme(Scheme)
capiv1.AddToScheme(Scheme)
configv1.AddToScheme(Scheme)
operatorv1.AddToScheme(Scheme)
diff --git a/support/globalconfig/globalconfig.go b/api/util/configrefs/refs.go
similarity index 61%
rename from support/globalconfig/globalconfig.go
rename to api/util/configrefs/refs.go
index e8c16c63d5e..356b5fe033b 100644
--- a/support/globalconfig/globalconfig.go
+++ b/api/util/configrefs/refs.go
@@ -1,102 +1,42 @@
-package globalconfig
+package configrefs
import (
- "context"
- "fmt"
-
configv1 "github.com/openshift/api/config/v1"
"k8s.io/apimachinery/pkg/util/sets"
- ctrl "sigs.k8s.io/controller-runtime"
-
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
- "github.com/openshift/hypershift/support/api"
)
-type GlobalConfig struct {
- APIServer *configv1.APIServer
- Authentication *configv1.Authentication
- FeatureGate *configv1.FeatureGate
- Image *configv1.Image
- Ingress *configv1.Ingress
- Network *configv1.Network
- OAuth *configv1.OAuth
- Scheduler *configv1.Scheduler
- Proxy *configv1.Proxy
- Build *configv1.Build
- Project *configv1.Project
-}
-
-type ObservedConfig struct {
- Image *configv1.Image
- Build *configv1.Build
- Project *configv1.Project
-}
-
-func ParseGlobalConfig(ctx context.Context, cfg *hyperv1.ClusterConfiguration) (GlobalConfig, error) {
- globalConfig := GlobalConfig{}
- if cfg == nil {
- return globalConfig, nil
- }
- kinds := sets.NewString() // keeps track of which kinds have been found
- for i, cfg := range cfg.Items {
- cfgObject, gvk, err := api.TolerantYAMLSerializer.Decode(cfg.Raw, nil, nil)
- if err != nil {
- return globalConfig, fmt.Errorf("cannot parse configuration at index %d: %w", i, err)
- }
- if gvk.GroupVersion().String() != configv1.GroupVersion.String() {
- return globalConfig, fmt.Errorf("invalid resource type found in configuration: kind: %s, apiVersion: %s", gvk.Kind, gvk.GroupVersion().String())
- }
- if kinds.Has(gvk.Kind) {
- return globalConfig, fmt.Errorf("duplicate config type found: %s", gvk.Kind)
- }
- kinds.Insert(gvk.Kind)
- switch obj := cfgObject.(type) {
- case *configv1.APIServer:
- if obj.Spec.Audit.Profile == "" {
- // Populate kubebuilder default for comparison
- // https://github.com/openshift/api/blob/f120778bee805ad1a7a4f05a6430332cf5811813/config/v1/types_apiserver.go#L57
- obj.Spec.Audit.Profile = configv1.DefaultAuditProfileType
- }
- globalConfig.APIServer = obj
- case *configv1.Authentication:
- globalConfig.Authentication = obj
- case *configv1.FeatureGate:
- globalConfig.FeatureGate = obj
- case *configv1.Ingress:
- globalConfig.Ingress = obj
- case *configv1.Network:
- globalConfig.Network = obj
- case *configv1.OAuth:
- globalConfig.OAuth = obj
- case *configv1.Scheduler:
- globalConfig.Scheduler = obj
- case *configv1.Proxy:
- globalConfig.Proxy = obj
- default:
- log := ctrl.LoggerFrom(ctx)
- log.Info("WARNING: unrecognized config found", "kind", gvk.Kind)
- }
- }
- return globalConfig, nil
+// ClusterConfiguration is an interface for the ClusterConfiguration type in the API
+// It is needed to avoid a circular import reference, given that this package is
+// used by the conversion code in the API package.
+type ClusterConfiguration interface {
+ GetAPIServer() *configv1.APIServerSpec
+ GetAuthentication() *configv1.AuthenticationSpec
+ GetFeatureGate() *configv1.FeatureGateSpec
+ GetImage() *configv1.ImageSpec
+ GetIngress() *configv1.IngressSpec
+ GetNetwork() *configv1.NetworkSpec
+ GetOAuth() *configv1.OAuthSpec
+ GetScheduler() *configv1.SchedulerSpec
+ GetProxy() *configv1.ProxySpec
}
-func SecretRefs(cfg *hyperv1.ClusterConfiguration) []string {
+func SecretRefs(cfg ClusterConfiguration) []string {
result := sets.NewString()
- result = result.Union(apiServerSecretRefs(cfg.APIServer))
- result = result.Union(authenticationSecretRefs(cfg.Authentication))
- result = result.Union(ingressSecretRefs(cfg.Ingress))
- result = result.Union(oauthSecretRefs(cfg.OAuth))
+ result = result.Union(apiServerSecretRefs(cfg.GetAPIServer()))
+ result = result.Union(authenticationSecretRefs(cfg.GetAuthentication()))
+ result = result.Union(ingressSecretRefs(cfg.GetIngress()))
+ result = result.Union(oauthSecretRefs(cfg.GetOAuth()))
return result.List()
}
-func ConfigMapRefs(cfg *hyperv1.ClusterConfiguration) []string {
+func ConfigMapRefs(cfg ClusterConfiguration) []string {
result := sets.NewString()
- result = result.Union(apiServerConfigMapRefs(cfg.APIServer))
- result = result.Union(authenticationConfigMapRefs(cfg.Authentication))
- result = result.Union(imageConfigMapRefs(cfg.Image))
- result = result.Union(oauthConfigMapRefs(cfg.OAuth))
- result = result.Union(proxyConfigMapRefs(cfg.Proxy))
- result = result.Union(schedulerConfigMapRefs(cfg.Scheduler))
+ result = result.Union(apiServerConfigMapRefs(cfg.GetAPIServer()))
+ result = result.Union(authenticationConfigMapRefs(cfg.GetAuthentication()))
+ result = result.Union(imageConfigMapRefs(cfg.GetImage()))
+ result = result.Union(oauthConfigMapRefs(cfg.GetOAuth()))
+ result = result.Union(proxyConfigMapRefs(cfg.GetProxy()))
+ result = result.Union(schedulerConfigMapRefs(cfg.GetScheduler()))
return result.List()
}
diff --git a/support/globalconfig/globalconfig_test.go b/api/util/configrefs/refs_test.go
similarity index 95%
rename from support/globalconfig/globalconfig_test.go
rename to api/util/configrefs/refs_test.go
index 8e9d5e25c89..ed7f4654d0a 100644
--- a/support/globalconfig/globalconfig_test.go
+++ b/api/util/configrefs/refs_test.go
@@ -1,48 +1,15 @@
-package globalconfig
+package configrefs
import (
- "context"
"reflect"
"testing"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
-var featureGateBytes = `
-apiVersion: config.openshift.io/v1
-kind: FeatureGate
-metadata:
- name: cluster
-spec:
- featureSet: LatencySensitive
- unknownField: example
-`
-
-func TestParseGlobalConfig(t *testing.T) {
- config := &hyperv1.ClusterConfiguration{
- Items: []runtime.RawExtension{
- {
- Raw: []byte(featureGateBytes),
- },
- },
- }
-
- globalConfig, err := ParseGlobalConfig(context.Background(), config)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if globalConfig.FeatureGate == nil {
- t.Fatalf("feature gate config not found")
- }
- if globalConfig.FeatureGate.Spec.FeatureSet != configv1.LatencySensitive {
- t.Errorf("unexpected featureset: %q", globalConfig.FeatureGate.Spec.FeatureSet)
- }
-}
-
func TestKnownConfigMapRefs(t *testing.T) {
actual := findRefs(reflect.TypeOf(hyperv1.ClusterConfiguration{}), "", "ConfigMapNameReference")
expected := sets.NewString(
diff --git a/api/util/ipnet/ipnet.go b/api/util/ipnet/ipnet.go
index 714f53f3fc8..4a305a153c4 100644
--- a/api/util/ipnet/ipnet.go
+++ b/api/util/ipnet/ipnet.go
@@ -10,6 +10,7 @@ import (
)
var nullString = "null"
+var nilString = ""
var nullBytes = []byte(nullString)
// IPNet wraps net.IPNet to get CIDR serialization.
@@ -42,7 +43,7 @@ func (ipnet *IPNet) String() string {
// MarshalJSON interface for an IPNet
func (ipnet *IPNet) MarshalJSON() (data []byte, err error) {
- if len(ipnet.IP) == 0 {
+ if ipnet == nil || len(ipnet.IP) == 0 {
return nullBytes, nil
}
@@ -63,6 +64,12 @@ func (ipnet *IPNet) UnmarshalJSON(b []byte) (err error) {
return fmt.Errorf("could not unmarshal string: %w", err)
}
+ if cidr == nilString {
+ ipnet.IP = net.IP{}
+ ipnet.Mask = net.IPMask{}
+ return nil
+ }
+
parsedIPNet, err := ParseCIDR(cidr)
if err != nil {
return fmt.Errorf("could not parse cidr %s: %w", cidr, err)
diff --git a/api/v1alpha1/clusterconfig.go b/api/v1alpha1/clusterconfig.go
new file mode 100644
index 00000000000..e8a9139662a
--- /dev/null
+++ b/api/v1alpha1/clusterconfig.go
@@ -0,0 +1,15 @@
+package v1alpha1
+
+import configv1 "github.com/openshift/api/config/v1"
+
+func (c *ClusterConfiguration) GetAPIServer() *configv1.APIServerSpec { return c.APIServer }
+func (c *ClusterConfiguration) GetAuthentication() *configv1.AuthenticationSpec {
+ return c.Authentication
+}
+func (c *ClusterConfiguration) GetFeatureGate() *configv1.FeatureGateSpec { return c.FeatureGate }
+func (c *ClusterConfiguration) GetImage() *configv1.ImageSpec { return c.Image }
+func (c *ClusterConfiguration) GetIngress() *configv1.IngressSpec { return c.Ingress }
+func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { return c.Network }
+func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth }
+func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler }
+func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy }
diff --git a/api/v1alpha1/endpointservice_types.go b/api/v1alpha1/endpointservice_types.go
index f9ee2044a56..626dae0fff6 100644
--- a/api/v1alpha1/endpointservice_types.go
+++ b/api/v1alpha1/endpointservice_types.go
@@ -61,12 +61,12 @@ type AWSEndpointServiceStatus struct {
// and the error reported in the message.
//
// Current condition types are: "Available"
- Conditions []metav1.Condition `json:"conditions"`
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsendpointservices,scope=Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// AWSEndpointService specifies a request for an Endpoint Service in AWS
type AWSEndpointService struct {
diff --git a/api/v1alpha1/hosted_controlplane.go b/api/v1alpha1/hosted_controlplane.go
index 87e0f1a0fae..51c6a9196d9 100644
--- a/api/v1alpha1/hosted_controlplane.go
+++ b/api/v1alpha1/hosted_controlplane.go
@@ -12,7 +12,6 @@ func init() {
// HostedControlPlane defines the desired state of HostedControlPlane
// +kubebuilder:resource:path=hostedcontrolplanes,shortName=hcp;hcps,scope=Namespaced,categories=cluster-api
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:object:root=true
type HostedControlPlane struct {
@@ -25,9 +24,23 @@ type HostedControlPlane struct {
// HostedControlPlaneSpec defines the desired state of HostedControlPlane
type HostedControlPlaneSpec struct {
- ReleaseImage string `json:"releaseImage"`
- PullSecret corev1.LocalObjectReference `json:"pullSecret"`
- IssuerURL string `json:"issuerURL"`
+ // ReleaseImage is the release image applied to the hosted control plane.
+ ReleaseImage string `json:"releaseImage"`
+
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
+ PullSecret corev1.LocalObjectReference `json:"pullSecret"`
+
+ // IssuerURL is an OIDC issuer URL which is used as the issuer in all
+ // ServiceAccount tokens generated by the control plane API server. The
+ // default value is kubernetes.default.svc, which only works for in-cluster
+ // validation.
+ IssuerURL string `json:"issuerURL"`
// Networking specifies network configuration for the cluster.
// Temporarily optional for backward compatibility, required in future releases.
@@ -256,16 +269,28 @@ type HostedControlPlaneStatus struct {
// +kubebuilder:validation:Optional
OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"`
+ // versionStatus is the status of the release version applied by the
+ // hosted control plane operator.
+ // +optional
+ VersionStatus *ClusterVersionStatus `json:"versionStatus,omitempty"`
+
// Version is the semantic version of the release applied by
// the hosted control plane operator
+ //
+ // Deprecated: Use versionStatus.desired.version instead.
// +kubebuilder:validation:Optional
Version string `json:"version,omitempty"`
// ReleaseImage is the release image applied to the hosted control plane.
+ //
+ // Deprecated: Use versionStatus.desired.image instead.
+ // +optional
ReleaseImage string `json:"releaseImage,omitempty"`
// lastReleaseImageTransitionTime is the time of the last update to the current
// releaseImage property.
+ //
+ // Deprecated: Use versionStatus.history[0].startedTime instead.
// +kubebuilder:validation:Optional
LastReleaseImageTransitionTime *metav1.Time `json:"lastReleaseImageTransitionTime,omitempty"`
@@ -280,8 +305,8 @@ type HostedControlPlaneStatus struct {
// Condition contains details for one aspect of the current state of the HostedControlPlane.
// Current condition types are: "Available"
- // +kubebuilder:validation:Required
- Conditions []metav1.Condition `json:"conditions"`
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
type APIEndpoint struct {
diff --git a/api/v1alpha1/hostedcluster_types.go b/api/v1alpha1/hostedcluster_types.go
index c7cdfeb3c0c..217496d711a 100644
--- a/api/v1alpha1/hostedcluster_types.go
+++ b/api/v1alpha1/hostedcluster_types.go
@@ -133,6 +133,13 @@ type HostedClusterSpec struct {
// +optional
ClusterID string `json:"clusterID,omitempty"`
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
// InfraID is a globally unique identifier for the cluster. This identifier
// will be used to associate various cloud resources with the HostedCluster
// and its associated NodePools.
@@ -1762,7 +1769,8 @@ type HostedClusterStatus struct {
// Conditions represents the latest available observations of a control
// plane's current state.
- Conditions []metav1.Condition `json:"conditions"`
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// ClusterVersionStatus reports the status of the cluster versioning,
@@ -1775,7 +1783,7 @@ type ClusterVersionStatus struct {
// desired is the version that the cluster is reconciling towards.
// If the cluster is not yet fully initialized desired will be set
// with the information available, which may be an image or a tag.
- Desired Release `json:"desired"`
+ Desired configv1.Release `json:"desired"`
// history contains a list of the most recent versions applied to the cluster.
// This value may be empty during cluster startup, and then will be updated
@@ -1792,6 +1800,27 @@ type ClusterVersionStatus struct {
// If this value is not equal to metadata.generation, then the desired
// and conditions fields may represent a previous version.
ObservedGeneration int64 `json:"observedGeneration"`
+
+ // availableUpdates contains updates recommended for this
+ // cluster. Updates which appear in conditionalUpdates but not in
+ // availableUpdates may expose this cluster to known issues. This list
+ // may be empty if no updates are recommended, if the update service
+ // is unavailable, or if an invalid channel has been specified.
+ // +nullable
+ // +kubebuilder:validation:Required
+ // +required
+ AvailableUpdates []configv1.Release `json:"availableUpdates"`
+
+ // conditionalUpdates contains the list of updates that may be
+ // recommended for this cluster if it meets specific required
+ // conditions. Consumers interested in the set of updates that are
+ // actually recommended for this cluster should use
+ // availableUpdates. This list may be empty if no updates are
+ // recommended, if the update service is unavailable, or if an empty
+ // or invalid channel has been specified.
+ // +listType=atomic
+ // +optional
+ ConditionalUpdates []configv1.ConditionalUpdate `json:"conditionalUpdates,omitempty"`
}
// ClusterConfiguration specifies configuration for individual OCP components in the
@@ -1893,8 +1922,8 @@ type ClusterConfiguration struct {
//
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=hostedclusters,shortName=hc;hcs,scope=Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1alpha1 is a deprecated version for HostedCluster"
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version.history[?(@.state==\"Completed\")].version",description="Version"
// +kubebuilder:printcolumn:name="KubeConfig",type="string",JSONPath=".status.kubeconfig.name",description="KubeConfig Secret"
// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.version.history[?(@.state!=\"\")].state",description="Progress"
diff --git a/api/v1alpha1/nodepool_types.go b/api/v1alpha1/nodepool_types.go
index bb0e7c3340d..71b428ab0b6 100644
--- a/api/v1alpha1/nodepool_types.go
+++ b/api/v1alpha1/nodepool_types.go
@@ -50,7 +50,7 @@ func init() {
// independent of the control plane’s underlying machine architecture.
//
// +kubebuilder:resource:path=nodepools,shortName=np;nps,scope=Namespaced
-// +kubebuilder:storageversion
+// +kubebuilder:deprecatedversion:warning="v1alpha1 is a deprecated version for NodePool"
// +kubebuilder:subresource:status
// +kubebuilder:object:root=true
// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas
@@ -151,7 +151,8 @@ type NodePoolStatus struct {
// Conditions represents the latest available observations of the node pool's
// current state.
- Conditions []NodePoolCondition `json:"conditions"`
+ // +optional
+ Conditions []NodePoolCondition `json:"conditions,omitempty"`
}
// NodePoolList contains a list of NodePools.
diff --git a/api/v1alpha1/zz_conversion.go b/api/v1alpha1/zz_conversion.go
new file mode 100644
index 00000000000..78a4f06e245
--- /dev/null
+++ b/api/v1alpha1/zz_conversion.go
@@ -0,0 +1,584 @@
+package v1alpha1
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/hypershift/api/util/configrefs"
+ "github.com/openshift/hypershift/api/util/ipnet"
+ "github.com/openshift/hypershift/api/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/util/sets"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+var (
+ localScheme = runtime.NewScheme()
+ serializer = json.NewSerializerWithOptions(
+ json.DefaultMetaFactory, localScheme, localScheme,
+ json.SerializerOptions{Strict: false},
+ )
+ v1beta1GroupVersion = schema.GroupVersion{Group: GroupVersion.Group, Version: "v1beta1"}
+)
+
+func init() {
+ v1beta1.AddToScheme(localScheme)
+ configv1.AddToScheme(localScheme)
+ clientgoscheme.AddToScheme(localScheme)
+ AddToScheme(localScheme)
+}
+
+// HostedCluster conversion
+func (h *HostedCluster) ConvertTo(rawDst conversion.Hub) error {
+ temp := h.DeepCopy()
+ if err := fixupHostedClusterBeforeConversion(temp); err != nil {
+ return err
+ }
+ return serializationConvert(temp, rawDst)
+}
+
+func (h *HostedCluster) ConvertFrom(rawSrc conversion.Hub) error {
+ err := serializationConvert(rawSrc, h)
+ if err != nil {
+ return err
+ }
+ return fixupHostedClusterAfterConversion(h)
+}
+
+func (n *NodePool) ConvertTo(rawDst conversion.Hub) error {
+ if n.Spec.NodeCount != nil && n.Spec.Replicas == nil {
+ n.Spec.Replicas = n.Spec.NodeCount
+ }
+ return serializationConvert(n, rawDst)
+}
+
+func (n *NodePool) ConvertFrom(rawSrc conversion.Hub) error {
+ return serializationConvert(rawSrc, n)
+}
+
+func (e *AWSEndpointService) ConvertTo(rawDst conversion.Hub) error {
+ return serializationConvert(e, rawDst)
+}
+func (e *AWSEndpointService) ConvertFrom(rawSrc conversion.Hub) error {
+ return serializationConvert(rawSrc, e)
+}
+
+func (h *HostedControlPlane) ConvertTo(rawDst conversion.Hub) error {
+ temp := h.DeepCopy()
+ if err := fixupHostedControlPlaneBeforeConversion(temp); err != nil {
+ return err
+ }
+ return serializationConvert(temp, rawDst)
+}
+
+func (h *HostedControlPlane) ConvertFrom(rawSrc conversion.Hub) error {
+ err := serializationConvert(rawSrc, h)
+ if err != nil {
+ return err
+ }
+ return fixupHostedControlPlaneAfterConversion(h)
+}
+
+func serializationConvert(from runtime.Object, to runtime.Object) error {
+ b := &bytes.Buffer{}
+ from.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+ if err := serializer.Encode(from, b); err != nil {
+ return fmt.Errorf("cannot serialize %T: %w", from, err)
+ }
+ if _, _, err := serializer.Decode(b.Bytes(), nil, to); err != nil {
+ return fmt.Errorf("cannot decode %T: %w", to, err)
+ }
+ gvks, _, err := localScheme.ObjectKinds(to)
+ if err != nil || len(gvks) == 0 {
+ return fmt.Errorf("cannot get gvk for %T: %w", to, err)
+ }
+ to.GetObjectKind().SetGroupVersionKind(gvks[0])
+ return nil
+}
+
+func fixupHostedClusterBeforeConversion(hc *HostedCluster) error {
+ if hc.Spec.Platform.AWS != nil {
+ reconcileDeprecatedAWSRoles(hc.Spec.Platform.AWS)
+ }
+ if err := reconcileDeprecatedGlobalConfig(hc.Spec.Configuration); err != nil {
+ return err
+ }
+ if err := reconcileDeprecatedNetworkSettings(&hc.Spec.Networking); err != nil {
+ return err
+ }
+ return nil
+}
+
+func fixupHostedClusterAfterConversion(hc *HostedCluster) error {
+ if hc.Spec.SecretEncryption != nil && hc.Spec.SecretEncryption.KMS != nil &&
+ hc.Spec.SecretEncryption.KMS.AWS != nil {
+ hc.Spec.SecretEncryption.KMS.AWS.Auth.Credentials.Name = convertARNToSecretName(hc.Spec.SecretEncryption.KMS.AWS.Auth.Credentials.Name)
+ }
+
+ return nil
+}
+
+func fixupHostedControlPlaneBeforeConversion(hcp *HostedControlPlane) error {
+ if hcp.Spec.Platform.AWS != nil {
+ reconcileDeprecatedAWSRoles(hcp.Spec.Platform.AWS)
+ }
+ if err := reconcileDeprecatedGlobalConfig(hcp.Spec.Configuration); err != nil {
+ return err
+ }
+ reconcileDeprecatedHCPNetworkSettings(hcp)
+ if err := reconcileDeprecatedNetworkSettings(&hcp.Spec.Networking); err != nil {
+ return err
+ }
+ return nil
+}
+
+func fixupHostedControlPlaneAfterConversion(hcp *HostedControlPlane) error {
+ populateDeprecatedNetworkingFields(&hcp.Spec.Networking)
+ populateDeprecatedHCPNetworkingFields(hcp)
+ if hcp.Spec.Platform.AWS != nil {
+ populateDeprecatedAWSRoles(hcp.Spec.Platform.AWS)
+ }
+ return populateDeprecatedGlobalConfig(hcp.Spec.Configuration)
+}
+
+func populateDeprecatedAWSRoles(aws *AWSPlatformSpec) {
+ aws.KubeCloudControllerCreds.Name = convertARNToSecretName(aws.RolesRef.KubeCloudControllerARN)
+ aws.NodePoolManagementCreds.Name = convertARNToSecretName(aws.RolesRef.NodePoolManagementARN)
+ aws.ControlPlaneOperatorCreds.Name = convertARNToSecretName(aws.RolesRef.ControlPlaneOperatorARN)
+ aws.Roles = []AWSRoleCredentials{
+ {
+ ARN: aws.RolesRef.NetworkARN,
+ Namespace: "openshift-cloud-network-config-controller",
+ Name: "cloud-credentials",
+ },
+ {
+ ARN: aws.RolesRef.StorageARN,
+ Namespace: "openshift-cluster-csi-drivers",
+ Name: "ebs-cloud-credentials",
+ },
+ {
+ ARN: aws.RolesRef.ImageRegistryARN,
+ Namespace: "openshift-image-registry",
+ Name: "installer-cloud-credentials",
+ },
+ {
+ ARN: aws.RolesRef.IngressARN,
+ Namespace: "openshift-ingress-operator",
+ Name: "cloud-credentials",
+ },
+ }
+}
+
+func reconcileDeprecatedAWSRoles(aws *AWSPlatformSpec) {
+ // Migrate ARNs from slice into typed fields.
+ for _, v := range aws.Roles {
+ switch v.Namespace {
+ case "openshift-image-registry":
+ aws.RolesRef.ImageRegistryARN = v.ARN
+ case "openshift-ingress-operator":
+ aws.RolesRef.IngressARN = v.ARN
+ case "openshift-cloud-network-config-controller":
+ aws.RolesRef.NetworkARN = v.ARN
+ case "openshift-cluster-csi-drivers":
+ aws.RolesRef.StorageARN = v.ARN
+ }
+ }
+
+ // For arns stored in secrets, delay the retrieval of the secret to the HostedCluster controller by setting a
+ // placeholder ARN here that tells the controller to do the lookup before reconciling the HostedCluster.
+
+ // Migrate ARNs from secrets into typed fields.
+ if aws.NodePoolManagementCreds.Name != "" && aws.RolesRef.NodePoolManagementARN == "" {
+ aws.RolesRef.NodePoolManagementARN = convertSecretNameToARN(aws.NodePoolManagementCreds.Name)
+ }
+
+ if aws.ControlPlaneOperatorCreds.Name != "" && aws.RolesRef.ControlPlaneOperatorARN == "" {
+ aws.RolesRef.ControlPlaneOperatorARN = convertSecretNameToARN(aws.ControlPlaneOperatorCreds.Name)
+ }
+
+ if aws.KubeCloudControllerCreds.Name != "" && aws.RolesRef.KubeCloudControllerARN == "" {
+ aws.RolesRef.KubeCloudControllerARN = convertSecretNameToARN(aws.KubeCloudControllerCreds.Name)
+ }
+}
+
+func convertSecretNameToARN(name string) string {
+ if name == "" {
+ return ""
+ }
+ if strings.HasPrefix(name, "arn::") {
+ return strings.TrimPrefix(name, "arn::")
+ }
+ return fmt.Sprintf("arn-from-secret::%s", name)
+}
+
+func convertARNToSecretName(arn string) string {
+ if strings.HasPrefix(arn, "arn-from-secret::") {
+ return strings.TrimPrefix(arn, "arn-from-secret::")
+ }
+ return ""
+}
+
+// reconcileDeprecatedGlobalConfig converts previously specified configuration in RawExtension format to
+func reconcileDeprecatedGlobalConfig(config *ClusterConfiguration) error {
+
+ // Skip if no deprecated configuration is set
+ if config == nil || len(config.Items) == 0 {
+ return nil
+ }
+
+ gconfig, err := ParseGlobalConfig(config)
+ if err != nil {
+ // This should never happen because at this point, the global configuration
+ // should be valid
+ return err
+ }
+
+ // Copy over config from the raw extension
+ if gconfig.APIServer != nil {
+ config.APIServer = &gconfig.APIServer.Spec
+ }
+ if gconfig.Authentication != nil {
+ config.Authentication = &gconfig.Authentication.Spec
+ }
+ if gconfig.FeatureGate != nil {
+ config.FeatureGate = &gconfig.FeatureGate.Spec
+ }
+ if gconfig.Image != nil {
+ config.Image = &gconfig.Image.Spec
+ }
+ if gconfig.Ingress != nil {
+ config.Ingress = &gconfig.Ingress.Spec
+ }
+ if gconfig.Network != nil {
+ config.Network = &gconfig.Network.Spec
+ }
+ if gconfig.OAuth != nil {
+ config.OAuth = &gconfig.OAuth.Spec
+ }
+ if gconfig.Scheduler != nil {
+ config.Scheduler = &gconfig.Scheduler.Spec
+ }
+ if gconfig.Proxy != nil {
+ config.Proxy = &gconfig.Proxy.Spec
+ }
+
+ return nil
+}
+
+type globalConfig struct {
+ APIServer *configv1.APIServer
+ Authentication *configv1.Authentication
+ FeatureGate *configv1.FeatureGate
+ Image *configv1.Image
+ Ingress *configv1.Ingress
+ Network *configv1.Network
+ OAuth *configv1.OAuth
+ Scheduler *configv1.Scheduler
+ Proxy *configv1.Proxy
+ Build *configv1.Build
+ Project *configv1.Project
+}
+
+func ParseGlobalConfig(cfg *ClusterConfiguration) (globalConfig, error) {
+ result := globalConfig{}
+ if cfg == nil {
+ return result, nil
+ }
+ kinds := sets.NewString() // keeps track of which kinds have been found
+ for i, cfg := range cfg.Items {
+ cfgObject, gvk, err := serializer.Decode(cfg.Raw, nil, nil)
+ if err != nil {
+ return result, fmt.Errorf("cannot parse configuration at index %d: %w", i, err)
+ }
+ if gvk.GroupVersion().String() != configv1.GroupVersion.String() {
+ return result, fmt.Errorf("invalid resource type found in configuration: kind: %s, apiVersion: %s", gvk.Kind, gvk.GroupVersion().String())
+ }
+ if kinds.Has(gvk.Kind) {
+ return result, fmt.Errorf("duplicate config type found: %s", gvk.Kind)
+ }
+ kinds.Insert(gvk.Kind)
+ switch obj := cfgObject.(type) {
+ case *configv1.APIServer:
+ if obj.Spec.Audit.Profile == "" {
+ // Populate kubebuilder default for comparison
+ // https://github.com/openshift/api/blob/f120778bee805ad1a7a4f05a6430332cf5811813/config/v1/types_apiserver.go#L57
+ obj.Spec.Audit.Profile = configv1.DefaultAuditProfileType
+ }
+ result.APIServer = obj
+ case *configv1.Authentication:
+ result.Authentication = obj
+ case *configv1.FeatureGate:
+ result.FeatureGate = obj
+ case *configv1.Ingress:
+ result.Ingress = obj
+ case *configv1.Network:
+ result.Network = obj
+ case *configv1.OAuth:
+ result.OAuth = obj
+ case *configv1.Scheduler:
+ result.Scheduler = obj
+ case *configv1.Proxy:
+ result.Proxy = obj
+ }
+ }
+ return result, nil
+}
+
+func reconcileDeprecatedNetworkSettings(networking *ClusterNetworking) error {
+ if networking.MachineCIDR != "" {
+ cidr, err := ipnet.ParseCIDR(networking.MachineCIDR)
+ if err != nil {
+ return fmt.Errorf("failed to parse machine CIDR %q: %w", networking.MachineCIDR, err)
+ }
+ networking.MachineNetwork = []MachineNetworkEntry{
+ {
+ CIDR: *cidr,
+ },
+ }
+ }
+ if networking.PodCIDR != "" {
+ cidr, err := ipnet.ParseCIDR(networking.PodCIDR)
+ if err != nil {
+ return fmt.Errorf("failed to parse pod CIDR %q: %w", networking.PodCIDR, err)
+ }
+ networking.ClusterNetwork = []ClusterNetworkEntry{
+ {
+ CIDR: *cidr,
+ },
+ }
+ }
+ if networking.ServiceCIDR != "" {
+ cidr, err := ipnet.ParseCIDR(networking.ServiceCIDR)
+ if err != nil {
+ return fmt.Errorf("failed to parse service CIDR: %w", err)
+ }
+ networking.ServiceNetwork = []ServiceNetworkEntry{
+ {
+ CIDR: *cidr,
+ },
+ }
+ }
+ return nil
+}
+
+func populateDeprecatedNetworkingFields(networking *ClusterNetworking) {
+ if len(networking.ServiceNetwork) > 0 {
+ networking.ServiceCIDR = cidrToString(networking.ServiceNetwork[0].CIDR)
+ } else {
+ networking.ServiceCIDR = ""
+ }
+ if len(networking.ClusterNetwork) > 0 {
+ networking.PodCIDR = cidrToString(networking.ClusterNetwork[0].CIDR)
+ } else {
+ networking.PodCIDR = ""
+ }
+ if len(networking.MachineNetwork) > 0 {
+ networking.MachineCIDR = cidrToString(networking.MachineNetwork[0].CIDR)
+ } else {
+ networking.MachineCIDR = ""
+ }
+}
+
+func cidrToString(cidr ipnet.IPNet) string {
+ if len(cidr.IP) == 0 {
+ return ""
+ }
+ return cidr.String()
+}
+
+func populateDeprecatedHCPNetworkingFields(hcp *HostedControlPlane) {
+ if len(hcp.Spec.Networking.ServiceNetwork) > 0 {
+ hcp.Spec.ServiceCIDR = hcp.Spec.Networking.ServiceNetwork[0].CIDR.String()
+ } else {
+ hcp.Spec.ServiceCIDR = ""
+ }
+ if len(hcp.Spec.Networking.ClusterNetwork) > 0 {
+ hcp.Spec.PodCIDR = hcp.Spec.Networking.ClusterNetwork[0].CIDR.String()
+ } else {
+ hcp.Spec.PodCIDR = ""
+ }
+ if len(hcp.Spec.Networking.MachineNetwork) > 0 {
+ hcp.Spec.MachineCIDR = hcp.Spec.Networking.MachineNetwork[0].CIDR.String()
+ } else {
+ hcp.Spec.MachineCIDR = ""
+ }
+ hcp.Spec.NetworkType = hcp.Spec.Networking.NetworkType
+
+ if hcp.Spec.Networking.APIServer != nil {
+ hcp.Spec.APIPort = hcp.Spec.Networking.APIServer.Port
+ hcp.Spec.APIAdvertiseAddress = hcp.Spec.Networking.APIServer.AdvertiseAddress
+ hcp.Spec.APIAllowedCIDRBlocks = hcp.Spec.Networking.APIServer.AllowedCIDRBlocks
+ }
+}
+
+func reconcileDeprecatedHCPNetworkSettings(hcp *HostedControlPlane) {
+ if hcp.Spec.ServiceCIDR != "" && hcp.Spec.Networking.ServiceCIDR == "" {
+ hcp.Spec.Networking.ServiceCIDR = hcp.Spec.ServiceCIDR
+ }
+ if hcp.Spec.PodCIDR != "" && hcp.Spec.Networking.PodCIDR == "" {
+ hcp.Spec.Networking.PodCIDR = hcp.Spec.PodCIDR
+ }
+ if hcp.Spec.MachineCIDR != "" && hcp.Spec.Networking.MachineCIDR == "" {
+ hcp.Spec.Networking.MachineCIDR = hcp.Spec.MachineCIDR
+ }
+ if hcp.Spec.NetworkType != "" && hcp.Spec.Networking.NetworkType == "" {
+ hcp.Spec.Networking.NetworkType = hcp.Spec.NetworkType
+ }
+ if hcp.Spec.APIPort != nil && (hcp.Spec.Networking.APIServer == nil || hcp.Spec.Networking.APIServer.Port == nil) {
+ if hcp.Spec.Networking.APIServer == nil {
+ hcp.Spec.Networking.APIServer = &APIServerNetworking{}
+ }
+ hcp.Spec.Networking.APIServer.Port = hcp.Spec.APIPort
+ }
+ if hcp.Spec.APIAdvertiseAddress != nil && (hcp.Spec.Networking.APIServer == nil || hcp.Spec.Networking.APIServer.AdvertiseAddress == nil) {
+ if hcp.Spec.Networking.APIServer == nil {
+ hcp.Spec.Networking.APIServer = &APIServerNetworking{}
+ }
+ hcp.Spec.Networking.APIServer.AdvertiseAddress = hcp.Spec.APIAdvertiseAddress
+ }
+ if len(hcp.Spec.APIAllowedCIDRBlocks) != 0 && (hcp.Spec.Networking.APIServer == nil || len(hcp.Spec.Networking.APIServer.AllowedCIDRBlocks) == 0) {
+ if hcp.Spec.Networking.APIServer == nil {
+ hcp.Spec.Networking.APIServer = &APIServerNetworking{}
+ }
+ hcp.Spec.Networking.APIServer.AllowedCIDRBlocks = hcp.Spec.APIAllowedCIDRBlocks
+ }
+}
+
+func populateDeprecatedGlobalConfig(config *ClusterConfiguration) error {
+ if config != nil {
+ items, err := configurationFieldsToRawExtensions(config)
+ if err != nil {
+ return fmt.Errorf("failed to convert configuration fields to raw extension: %w", err)
+ }
+ config.Items = items
+ secretRef := []corev1.LocalObjectReference{}
+ configMapRef := []corev1.LocalObjectReference{}
+ for _, secretName := range configrefs.SecretRefs(config) {
+ secretRef = append(secretRef, corev1.LocalObjectReference{
+ Name: secretName,
+ })
+ }
+ for _, configMapName := range configrefs.ConfigMapRefs(config) {
+ configMapRef = append(configMapRef, corev1.LocalObjectReference{
+ Name: configMapName,
+ })
+ }
+ config.SecretRefs = secretRef
+ config.ConfigMapRefs = configMapRef
+ }
+ return nil
+}
+
+func configurationFieldsToRawExtensions(config *ClusterConfiguration) ([]runtime.RawExtension, error) {
+ var result []runtime.RawExtension
+ if config == nil {
+ return result, nil
+ }
+ if config.APIServer != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.APIServer{
+ Spec: *config.APIServer,
+ },
+ })
+ }
+ if config.Authentication != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Authentication{
+ Spec: *config.Authentication,
+ },
+ })
+ }
+ if config.FeatureGate != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.FeatureGate{
+ Spec: *config.FeatureGate,
+ },
+ })
+ }
+ if config.Image != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Image{
+ Spec: *config.Image,
+ },
+ })
+ }
+ if config.Ingress != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Ingress{
+ Spec: *config.Ingress,
+ },
+ })
+ }
+ if config.Network != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Network{
+ Spec: *config.Network,
+ },
+ })
+ }
+ if config.OAuth != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.OAuth{
+ Spec: *config.OAuth,
+ },
+ })
+ }
+ if config.Scheduler != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Scheduler{
+ Spec: *config.Scheduler,
+ },
+ })
+ }
+ if config.Proxy != nil {
+ result = append(result, runtime.RawExtension{
+ Object: &configv1.Proxy{
+ Spec: *config.Proxy,
+ },
+ })
+ }
+
+ for idx := range result {
+ gvks, _, err := localScheme.ObjectKinds(result[idx].Object)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get gvk for %T: %w", result[idx].Object, err)
+ }
+ if len(gvks) == 0 {
+ return nil, fmt.Errorf("failed to determine gvk for %T", result[idx].Object)
+ }
+ result[idx].Object.GetObjectKind().SetGroupVersionKind(gvks[0])
+
+ // We do a DeepEqual in the upsert func, so we must match the deserialized version from
+ // the server which has Raw set and Object unset.
+ b := &bytes.Buffer{}
+ if err := serializer.Encode(result[idx].Object, b); err != nil {
+ return nil, fmt.Errorf("failed to marshal %+v: %w", result[idx].Object, err)
+ }
+
+ // Remove the status part of the serialized resource. We only have
+ // spec to begin with and status causes incompatibilities with previous
+ // versions of the CPO
+ unstructuredObject := &unstructured.Unstructured{}
+ if _, _, err := unstructured.UnstructuredJSONScheme.Decode(b.Bytes(), nil, unstructuredObject); err != nil {
+ return nil, fmt.Errorf("failed to decode resource into unstructured: %w", err)
+ }
+ unstructured.RemoveNestedField(unstructuredObject.Object, "status")
+ b = &bytes.Buffer{}
+ if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredObject, b); err != nil {
+ return nil, fmt.Errorf("failed to serialize unstructured resource: %w", err)
+ }
+
+ result[idx].Raw = bytes.TrimSuffix(b.Bytes(), []byte("\n"))
+ result[idx].Object = nil
+ }
+
+ return result, nil
+}
diff --git a/api/v1alpha1/zz_conversion_test.go b/api/v1alpha1/zz_conversion_test.go
new file mode 100644
index 00000000000..321177fc01c
--- /dev/null
+++ b/api/v1alpha1/zz_conversion_test.go
@@ -0,0 +1,442 @@
+package v1alpha1
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ fuzz "github.com/google/gofuzz"
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/hypershift/api/util/configrefs"
+ "github.com/openshift/hypershift/api/util/ipnet"
+ "github.com/openshift/hypershift/api/v1beta1"
+ "github.com/openshift/hypershift/support/conversiontest"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+func networkingFuzzer(in *ClusterNetworking, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ in.MachineCIDR = "10.10.100.0/24"
+ in.PodCIDR = "10.10.101.0/24"
+ in.ServiceCIDR = "10.10.102.0/24"
+ in.MachineNetwork = []MachineNetworkEntry{
+ {
+ CIDR: mustParseCIDR(in.MachineCIDR),
+ },
+ }
+ in.ClusterNetwork = []ClusterNetworkEntry{
+ {
+ CIDR: mustParseCIDR(in.PodCIDR),
+ },
+ }
+ in.ServiceNetwork = []ServiceNetworkEntry{
+ {
+ CIDR: mustParseCIDR(in.ServiceCIDR),
+ },
+ }
+}
+
+func v1beta1NetworkingFuzzer(in *v1beta1.ClusterNetworking, c fuzz.Continue) {
+ in.ClusterNetwork = []v1beta1.ClusterNetworkEntry{{CIDR: mustParseCIDR("10.11.100.0/24")}}
+ in.MachineNetwork = []v1beta1.MachineNetworkEntry{{CIDR: mustParseCIDR("10.11.101.0/24")}}
+ in.ServiceNetwork = []v1beta1.ServiceNetworkEntry{{CIDR: mustParseCIDR("10.11.102.0/24")}}
+}
+
+func mustParseCIDR(str string) ipnet.IPNet {
+ result, err := ipnet.ParseCIDR(str)
+ if err != nil {
+ panic(err.Error())
+ }
+ return *result
+}
+
+func v1beta1ConfigFuzzer(in *v1beta1.ClusterConfiguration, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ if in.APIServer != nil && in.APIServer.Audit.Profile == "" {
+ in.APIServer.Audit.Profile = configv1.DefaultAuditProfileType
+ }
+}
+
+func configFuzzer(in *ClusterConfiguration, c fuzz.Continue) {
+ in.Items = nil
+ if randomBool() {
+ in.APIServer = &configv1.APIServerSpec{}
+ c.Fuzz(in.APIServer)
+ if in.APIServer.Audit.Profile == "" {
+ in.APIServer.Audit.Profile = configv1.DefaultAuditProfileType
+ }
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.APIServer{Spec: *in.APIServer}),
+ })
+ }
+ if randomBool() {
+ in.Authentication = &configv1.AuthenticationSpec{}
+ c.Fuzz(in.Authentication)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Authentication{Spec: *in.Authentication}),
+ })
+ }
+ if randomBool() {
+ in.FeatureGate = &configv1.FeatureGateSpec{}
+ c.Fuzz(in.FeatureGate)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.FeatureGate{Spec: *in.FeatureGate}),
+ })
+ }
+ if randomBool() {
+ in.Image = &configv1.ImageSpec{}
+ c.Fuzz(in.Image)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Image{Spec: *in.Image}),
+ })
+ }
+ if randomBool() {
+ in.Ingress = &configv1.IngressSpec{}
+ c.Fuzz(in.Ingress)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Ingress{Spec: *in.Ingress}),
+ })
+ }
+ if randomBool() {
+ in.Network = &configv1.NetworkSpec{}
+ c.Fuzz(in.Network)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Network{Spec: *in.Network}),
+ })
+ }
+ if randomBool() {
+ in.OAuth = &configv1.OAuthSpec{}
+ c.Fuzz(in.OAuth)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.OAuth{Spec: *in.OAuth}),
+ })
+ }
+ if randomBool() {
+ in.Scheduler = &configv1.SchedulerSpec{}
+ c.Fuzz(in.Scheduler)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Scheduler{Spec: *in.Scheduler}),
+ })
+ }
+ if randomBool() {
+ in.Proxy = &configv1.ProxySpec{}
+ c.Fuzz(in.Proxy)
+ in.Items = append(in.Items, runtime.RawExtension{
+ Raw: serializeResource(&configv1.Proxy{Spec: *in.Proxy}),
+ })
+ }
+ configMapRefs := []corev1.LocalObjectReference{}
+ for _, ref := range configrefs.ConfigMapRefs(in) {
+ configMapRefs = append(configMapRefs, corev1.LocalObjectReference{
+ Name: ref,
+ })
+ }
+ in.ConfigMapRefs = configMapRefs
+ secretRefs := []corev1.LocalObjectReference{}
+ for _, ref := range configrefs.SecretRefs(in) {
+ secretRefs = append(secretRefs, corev1.LocalObjectReference{
+ Name: ref,
+ })
+ }
+ in.SecretRefs = secretRefs
+}
+
+func serializeResource(obj runtime.Object) []byte {
+ b := &bytes.Buffer{}
+ gvks, _, err := localScheme.ObjectKinds(obj)
+ if err != nil {
+ panic(err.Error())
+ }
+ if len(gvks) == 0 {
+ panic(fmt.Sprintf("did not find gvk for %T", obj))
+ }
+ obj.GetObjectKind().SetGroupVersionKind(gvks[0])
+ err = serializer.Encode(obj, b)
+ if err != nil {
+ panic(err.Error())
+ }
+
+ // Remove the status part of the serialized resource. We only have
+ // spec to begin with and status causes incompatibilities with previous
+ // versions of the CPO
+ unstructuredObject := &unstructured.Unstructured{}
+ if _, _, err := unstructured.UnstructuredJSONScheme.Decode(b.Bytes(), nil, unstructuredObject); err != nil {
+ return nil
+ }
+ unstructured.RemoveNestedField(unstructuredObject.Object, "status")
+ b = &bytes.Buffer{}
+ if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredObject, b); err != nil {
+ return nil
+ }
+
+ return bytes.TrimSuffix(b.Bytes(), []byte("\n"))
+}
+
+func randomBool() bool {
+ rand.Seed(time.Now().UnixNano())
+ return rand.Intn(2) == 1
+}
+
+func awsRolesRefFuzzer(in *AWSPlatformSpec, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ roles := []AWSRoleCredentials{
+ {
+ ARN: c.RandString(),
+ Namespace: "openshift-image-registry",
+ Name: "installer-cloud-credentials",
+ },
+ {
+ ARN: c.RandString(),
+ Namespace: "openshift-ingress-operator",
+ Name: "cloud-credentials",
+ },
+ {
+ ARN: c.RandString(),
+ Namespace: "openshift-cloud-network-config-controller",
+ Name: "cloud-credentials",
+ },
+ {
+ ARN: c.RandString(),
+ Namespace: "openshift-cluster-csi-drivers",
+ Name: "ebs-cloud-credentials",
+ },
+ }
+ sort.SliceStable(roles, func(i, j int) bool {
+ return roles[i].Namespace < roles[j].Namespace
+ })
+ in.Roles = roles
+ in.KubeCloudControllerCreds = corev1.LocalObjectReference{
+ Name: c.RandString(),
+ }
+ in.ControlPlaneOperatorCreds = corev1.LocalObjectReference{
+ Name: c.RandString(),
+ }
+ in.NodePoolManagementCreds = corev1.LocalObjectReference{
+ Name: c.RandString(),
+ }
+ in.RolesRef = AWSRolesRef{}
+}
+
+func hcpFuzzer(in *HostedControlPlane, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ in.Spec.ServiceCIDR = in.Spec.Networking.ServiceCIDR
+ in.Spec.PodCIDR = in.Spec.Networking.PodCIDR
+ in.Spec.MachineCIDR = in.Spec.Networking.MachineCIDR
+ in.Spec.NetworkType = in.Spec.Networking.NetworkType
+ if in.Spec.Networking.APIServer != nil {
+ in.Spec.APIPort = in.Spec.Networking.APIServer.Port
+ in.Spec.APIAdvertiseAddress = in.Spec.Networking.APIServer.AdvertiseAddress
+ in.Spec.APIAllowedCIDRBlocks = in.Spec.Networking.APIServer.AllowedCIDRBlocks
+ } else {
+ in.Spec.APIPort = nil
+ in.Spec.APIAdvertiseAddress = nil
+ in.Spec.APIAllowedCIDRBlocks = nil
+ }
+}
+
+func awsEndpointServiceFuzzer(in *AWSEndpointService, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ in.Status.DNSName = ""
+}
+
+func nodePoolFuzzer(in *NodePool, c fuzz.Continue) {
+ c.FuzzNoCustom(in)
+ in.Spec.NodeCount = nil
+}
+
+func fixupHostedCluster(in conversion.Convertible) {
+ removeTypeMeta(in)
+ hc, ok := in.(*HostedCluster)
+ if !ok {
+ panic(fmt.Sprintf("unexpected convertible type: %T", in))
+ }
+ if hc.Spec.Configuration != nil {
+ err := populateDeprecatedGlobalConfig(hc.Spec.Configuration)
+ if err != nil {
+ panic(err.Error())
+ }
+ }
+ if hc.Spec.Platform.AWS != nil {
+ populateDeprecatedAWSRoles(hc.Spec.Platform.AWS)
+ hc.Spec.Platform.AWS.RolesRef = AWSRolesRef{}
+ }
+ if hc.Spec.SecretEncryption.KMS != nil && hc.Spec.SecretEncryption.KMS.AWS != nil {
+ hc.Spec.SecretEncryption.KMS.AWS.Auth.Credentials.Name = ""
+ }
+ populateDeprecatedNetworkingFields(&hc.Spec.Networking)
+}
+
+func fixupHostedControlPlane(in conversion.Convertible) {
+ removeTypeMeta(in)
+ hcp, ok := in.(*HostedControlPlane)
+ if !ok {
+ panic(fmt.Sprintf("unexpected convertible type: %T", in))
+ }
+ if hcp.Spec.Configuration != nil {
+ for i, item := range hcp.Spec.Configuration.Items {
+ resource, _, err := serializer.Decode(item.Raw, nil, nil)
+ if err != nil {
+ panic(err.Error())
+ }
+ hcp.Spec.Configuration.Items[i].Raw = serializeResource(resource)
+ }
+ }
+ if hcp.Spec.Platform.AWS != nil {
+ hcp.Spec.Platform.AWS.RolesRef = AWSRolesRef{}
+ roles := hcp.Spec.Platform.AWS.Roles
+ sort.SliceStable(roles, func(i, j int) bool {
+ return roles[i].Namespace < roles[j].Namespace
+ })
+ hcp.Spec.Platform.AWS.Roles = roles
+ }
+}
+
+func hostedClusterFuzzerFuncs(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ awsRolesRefFuzzer,
+ configFuzzer,
+ networkingFuzzer,
+ v1beta1ConfigFuzzer,
+ v1beta1NetworkingFuzzer,
+ }
+}
+
+func hostedControlPlaneFuzzerFuncs(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ hcpFuzzer,
+ awsRolesRefFuzzer,
+ configFuzzer,
+ networkingFuzzer,
+ v1beta1NetworkingFuzzer,
+ v1beta1ConfigFuzzer,
+ }
+}
+
+func awsEndpointServiceFuzzerFuncs(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ awsEndpointServiceFuzzer,
+ }
+}
+
+func NodePoolFuzzerFuncs(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ nodePoolFuzzer,
+ }
+}
+
+func removeTypeMeta(in conversion.Convertible) {
+ in.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+}
+
+func removeHubTypeMeta(in conversion.Hub) {
+ in.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+}
+
+func TestFuzzyConversion(t *testing.T) {
+ t.Run("for HostedCluster", conversiontest.FuzzTestFunc(conversiontest.FuzzTestFuncInput{
+ Hub: &v1beta1.HostedCluster{},
+ HubAfterMutation: removeHubTypeMeta,
+ Spoke: &HostedCluster{},
+ SpokeAfterMutation: fixupHostedCluster,
+ FuzzerFuncs: []fuzzer.FuzzerFuncs{hostedClusterFuzzerFuncs},
+ Scheme: localScheme,
+ }))
+ t.Run("for NodePool", conversiontest.FuzzTestFunc(conversiontest.FuzzTestFuncInput{
+ Hub: &v1beta1.NodePool{},
+ HubAfterMutation: removeHubTypeMeta,
+ Spoke: &NodePool{},
+ SpokeAfterMutation: removeTypeMeta,
+ FuzzerFuncs: []fuzzer.FuzzerFuncs{NodePoolFuzzerFuncs},
+ }))
+ t.Run("for HostedControlPlane", conversiontest.FuzzTestFunc(conversiontest.FuzzTestFuncInput{
+ Hub: &v1beta1.HostedControlPlane{},
+ HubAfterMutation: removeHubTypeMeta,
+ Spoke: &HostedControlPlane{},
+ SpokeAfterMutation: fixupHostedControlPlane,
+ FuzzerFuncs: []fuzzer.FuzzerFuncs{hostedControlPlaneFuzzerFuncs},
+ }))
+ t.Run("for AWSEndpointService", conversiontest.FuzzTestFunc(conversiontest.FuzzTestFuncInput{
+ Hub: &v1beta1.AWSEndpointService{},
+ HubAfterMutation: removeHubTypeMeta,
+ Spoke: &AWSEndpointService{},
+ SpokeAfterMutation: removeTypeMeta,
+ FuzzerFuncs: []fuzzer.FuzzerFuncs{awsEndpointServiceFuzzerFuncs},
+ }))
+}
+
+func TestConfigurationFieldsToRawExtensions(t *testing.T) {
+ config := &ClusterConfiguration{
+ Ingress: &configv1.IngressSpec{Domain: "example.com"},
+ Proxy: &configv1.ProxySpec{HTTPProxy: "http://10.0.136.57:3128", HTTPSProxy: "http://10.0.136.57:3128"},
+ }
+ result, err := configurationFieldsToRawExtensions(config)
+ if err != nil {
+ t.Fatalf("configurationFieldsToRawExtensions: %v", err)
+ }
+
+ // Check that serialized resources do not contain a status section
+ for i, rawExt := range result {
+ unstructuredObj := &unstructured.Unstructured{}
+ _, _, err := unstructured.UnstructuredJSONScheme.Decode(rawExt.Raw, nil, unstructuredObj)
+ if err != nil {
+ t.Fatalf("unexpected decode error: %v", err)
+ }
+ _, exists, err := unstructured.NestedFieldNoCopy(unstructuredObj.Object, "status")
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if exists {
+ t.Errorf("status field exists for resource %d", i)
+ }
+ }
+
+ serialized, err := json.Marshal(result)
+ if err != nil {
+ t.Fatalf("json.Marshal: %v", err)
+ }
+
+ var roundtripped []runtime.RawExtension
+ if err := json.Unmarshal(serialized, &roundtripped); err != nil {
+ t.Fatalf("json.Unmarshal: %v", err)
+ }
+
+ // CreateOrUpdate does a naive DeepEqual which can not deal with custom unmarshallers, so make
+ // sure the output matches a roundtripped result.
+ if diff := cmp.Diff(result, roundtripped); diff != "" {
+ t.Errorf("output does not match a json-roundtripped version: %s", diff)
+ }
+
+ var ingress configv1.Ingress
+ if err := json.Unmarshal(result[0].Raw, &ingress); err != nil {
+ t.Fatalf("failed to unmarshal raw data: %v", err)
+ }
+ if ingress.APIVersion == "" || ingress.Kind == "" {
+ t.Errorf("rawObject has no apiVersion or kind set: %+v", ingress.ObjectMeta)
+ }
+ if ingress.Spec.Domain != "example.com" {
+ t.Errorf("ingress does not have expected domain: %q", ingress.Spec.Domain)
+ }
+
+ var proxy configv1.Proxy
+ if err := json.Unmarshal(result[1].Raw, &proxy); err != nil {
+ t.Fatalf("failed to unmarshal raw data: %v", err)
+ }
+ if proxy.APIVersion == "" || proxy.Kind == "" {
+ t.Errorf("rawObject has no apiVersion or kind set: %+v", proxy.ObjectMeta)
+ }
+ if proxy.Spec.HTTPProxy != "http://10.0.136.57:3128" {
+ t.Errorf("proxy does not have expected HTTPProxy: %q", proxy.Spec.HTTPProxy)
+ }
+
+}
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 37ac7fbfbf7..3e3154580b3 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -675,7 +675,7 @@ func (in *ClusterNetworking) DeepCopy() *ClusterNetworking {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
*out = *in
- out.Desired = in.Desired
+ in.Desired.DeepCopyInto(&out.Desired)
if in.History != nil {
in, out := &in.History, &out.History
*out = make([]configv1.UpdateHistory, len(*in))
@@ -683,6 +683,20 @@ func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.AvailableUpdates != nil {
+ in, out := &in.AvailableUpdates, &out.AvailableUpdates
+ *out = make([]configv1.Release, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ConditionalUpdates != nil {
+ in, out := &in.ConditionalUpdates, &out.ConditionalUpdates
+ *out = make([]configv1.ConditionalUpdate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
@@ -1102,6 +1116,11 @@ func (in *HostedControlPlaneStatus) DeepCopyInto(out *HostedControlPlaneStatus)
**out = **in
}
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ if in.VersionStatus != nil {
+ in, out := &in.VersionStatus, &out.VersionStatus
+ *out = new(ClusterVersionStatus)
+ (*in).DeepCopyInto(*out)
+ }
if in.LastReleaseImageTransitionTime != nil {
in, out := &in.LastReleaseImageTransitionTime, &out.LastReleaseImageTransitionTime
*out = (*in).DeepCopy()
diff --git a/api/v1beta1/capi_types.go b/api/v1beta1/capi_types.go
new file mode 100644
index 00000000000..06b4c80080d
--- /dev/null
+++ b/api/v1beta1/capi_types.go
@@ -0,0 +1,11 @@
+package v1beta1
+
+// These imports are used to explicitly declare external API dependencies
+import (
+ _ "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
+ _ "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
+ _ "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
+ _ "sigs.k8s.io/cluster-api/api/v1beta1"
+ _ "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
+ _ "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+)
diff --git a/api/v1beta1/clusterconfig.go b/api/v1beta1/clusterconfig.go
new file mode 100644
index 00000000000..77f45afcbba
--- /dev/null
+++ b/api/v1beta1/clusterconfig.go
@@ -0,0 +1,15 @@
+package v1beta1
+
+import configv1 "github.com/openshift/api/config/v1"
+
+func (c *ClusterConfiguration) GetAPIServer() *configv1.APIServerSpec { return c.APIServer }
+func (c *ClusterConfiguration) GetAuthentication() *configv1.AuthenticationSpec {
+ return c.Authentication
+}
+func (c *ClusterConfiguration) GetFeatureGate() *configv1.FeatureGateSpec { return c.FeatureGate }
+func (c *ClusterConfiguration) GetImage() *configv1.ImageSpec { return c.Image }
+func (c *ClusterConfiguration) GetIngress() *configv1.IngressSpec { return c.Ingress }
+func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { return c.Network }
+func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth }
+func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler }
+func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy }
diff --git a/api/v1beta1/conditions.go b/api/v1beta1/conditions.go
new file mode 100644
index 00000000000..38d3780810c
--- /dev/null
+++ b/api/v1beta1/conditions.go
@@ -0,0 +1,118 @@
+package v1beta1
+
+// HostedCluster conditions.
+const (
+ // HostedClusterAvailable indicates whether the HostedCluster has a healthy
+ // control plane.
+ HostedClusterAvailable ConditionType = "Available"
+ // HostedClusterProgressing indicates whether the HostedCluster is attempting
+ // an initial deployment or upgrade.
+ HostedClusterProgressing ConditionType = "Progressing"
+ // HostedClusterDegraded indicates whether the HostedCluster is encountering
+ // an error that may require user intervention to resolve.
+ HostedClusterDegraded ConditionType = "Degraded"
+
+ // Bubble up from HCP.
+
+ // InfrastructureReady bubbles up the same condition from HCP.
+ InfrastructureReady ConditionType = "InfrastructureReady"
+ // KubeAPIServerAvailable bubbles up the same condition from HCP.
+ KubeAPIServerAvailable ConditionType = "KubeAPIServerAvailable"
+ // EtcdAvailable bubbles up the same condition from HCP.
+ EtcdAvailable ConditionType = "EtcdAvailable"
+ // ValidHostedControlPlaneConfiguration bubbles up the same condition from HCP.
+ ValidHostedControlPlaneConfiguration ConditionType = "ValidHostedControlPlaneConfiguration"
+
+ // Bubble up from HCP which bubbles up from CVO.
+
+ // ClusterVersionSucceeding indicates the current status of the desired release
+ // version of the HostedCluster as indicated by the Failing condition in the
+ // underlying cluster's ClusterVersion.
+ ClusterVersionSucceeding ConditionType = "ClusterVersionSucceeding"
+ // ClusterVersionUpgradeable indicates the Upgradeable condition in the
+ // underlying cluster's ClusterVersion.
+ ClusterVersionUpgradeable ConditionType = "ClusterVersionUpgradeable"
+ // ClusterVersionFailing bubbles up Failing from the CVO.
+ ClusterVersionFailing ConditionType = "ClusterVersionFailing"
+ // ClusterVersionProgressing bubbles up configv1.OperatorProgressing from the CVO.
+ ClusterVersionProgressing ConditionType = "ClusterVersionProgressing"
+ // ClusterVersionAvailable bubbles up Failing configv1.OperatorAvailable from the CVO.
+ ClusterVersionAvailable ConditionType = "ClusterVersionAvailable"
+ // ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO.
+ ClusterVersionReleaseAccepted ConditionType = "ClusterVersionReleaseAccepted"
+
+ // UnmanagedEtcdAvailable indicates whether a user-managed etcd cluster is
+ // healthy.
+ UnmanagedEtcdAvailable ConditionType = "UnmanagedEtcdAvailable"
+
+ // IgnitionEndpointAvailable indicates whether the ignition server for the
+ // HostedCluster is available to handle ignition requests.
+ IgnitionEndpointAvailable ConditionType = "IgnitionEndpointAvailable"
+
+ // ValidHostedClusterConfiguration indicates (if status is true) that the
+ // ClusterConfiguration specified for the HostedCluster is valid.
+ ValidHostedClusterConfiguration ConditionType = "ValidConfiguration"
+
+ // SupportedHostedCluster indicates whether a HostedCluster is supported by
+ // the current configuration of the hypershift-operator.
+ // e.g. If HostedCluster requests endpointAcess Private but the hypershift-operator
+ // is running on a management cluster outside AWS or is not configured with AWS
+ // credentials, the HostedCluster is not supported.
+ SupportedHostedCluster ConditionType = "SupportedHostedCluster"
+
+ // ValidOIDCConfiguration indicates if an AWS cluster's OIDC condition is
+ // detected as invalid.
+ ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration"
+
+ // ValidReleaseImage indicates if the release image set in the spec is valid
+ // for the HostedCluster. For example, this can be set false if the
+ // HostedCluster itself attempts an unsupported version before 4.9 or an
+ // unsupported upgrade e.g y-stream upgrade before 4.11.
+ ValidReleaseImage ConditionType = "ValidReleaseImage"
+
+ // PlatformCredentialsFound indicates that credentials required for the
+ // desired platform are valid.
+ PlatformCredentialsFound ConditionType = "PlatformCredentialsFound"
+
+ // ReconciliationActive indicates if reconciliation of the HostedCluster is
+ // active or paused.
+ ReconciliationActive ConditionType = "ReconciliationActive"
+ // ReconciliationSucceeded indicates if the HostedCluster reconciliation
+ // succeeded.
+ ReconciliationSucceeded ConditionType = "ReconciliationSucceeded"
+)
+
+// Reasons.
+const (
+ StatusUnknownReason = "StatusUnknown"
+ AsExpectedReason = "AsExpected"
+ NotFoundReason = "NotFound"
+ WaitingForAvailableReason = "waitingForAvailable"
+ SecretNotFoundReason = "SecretNotFound"
+
+ InfraStatusFailureReason = "InfraStatusFailure"
+ WaitingOnInfrastructureReadyReason = "WaitingOnInfrastructureReady"
+
+ EtcdQuorumAvailableReason = "QuorumAvailable"
+ EtcdWaitingForQuorumReason = "EtcdWaitingForQuorum"
+ EtcdStatefulSetNotFoundReason = "StatefulSetNotFound"
+
+ UnmanagedEtcdMisconfiguredReason = "UnmanagedEtcdMisconfigured"
+ UnmanagedEtcdAsExpected = "UnmanagedEtcdAsExpected"
+
+ FromClusterVersionReason = "FromClusterVersion"
+
+ InvalidConfigurationReason = "InvalidConfiguration"
+ KubeconfigWaitingForCreateReason = "KubeconfigWaitingForCreate"
+ UnsupportedHostedClusterReason = "UnsupportedHostedCluster"
+ InsufficientClusterCapabilitiesReason = "InsufficientClusterCapabilities"
+ OIDCConfigurationInvalidReason = "OIDCConfigurationInvalid"
+ PlatformCredentialsNotFoundReason = "PlatformCredentialsNotFound"
+ InvalidImageReason = "InvalidImage"
+)
+
+// Messages.
+const (
+ // AllIsWellMessage is standard message.
+ AllIsWellMessage = "All is well"
+)
diff --git a/api/v1beta1/conversion.go b/api/v1beta1/conversion.go
new file mode 100644
index 00000000000..01f3a38f989
--- /dev/null
+++ b/api/v1beta1/conversion.go
@@ -0,0 +1,7 @@
+package v1beta1
+
+// Declare the types in this version as the Hub
+func (*HostedCluster) Hub() {}
+func (*NodePool) Hub() {}
+func (*AWSEndpointService) Hub() {}
+func (*HostedControlPlane) Hub() {}
diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go
new file mode 100644
index 00000000000..01fb7848880
--- /dev/null
+++ b/api/v1beta1/doc.go
@@ -0,0 +1,14 @@
+/*
+Package v1beta1 contains the HyperShift API.
+
+The HyperShift API enables creating and managing lightweight, flexible, heterogeneous
+OpenShift clusters at scale.
+
+HyperShift clusters are deployed in a topology which isolates the "control plane"
+(e.g. etcd, the API server, controller manager, etc.) from the "data plane" (e.g.
+worker nodes and their kubelets, and the infrastructure on which they run). This
+enables "hosted control plane as a service" use cases.
+*/
+// +kubebuilder:object:generate=true
+// +groupName=hypershift.openshift.io
+package v1beta1
diff --git a/api/v1beta1/endpointservice_types.go b/api/v1beta1/endpointservice_types.go
new file mode 100644
index 00000000000..f6654b77b1a
--- /dev/null
+++ b/api/v1beta1/endpointservice_types.go
@@ -0,0 +1,87 @@
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func init() {
+ SchemeBuilder.Register(&AWSEndpointService{}, &AWSEndpointServiceList{})
+}
+
+// The following are reasons for the IgnitionEndpointAvailable condition.
+const (
+ // AWSEndpointServiceAvailable indicates whether the AWS Endpoint Service
+ // has been created for the specified NLB in the management VPC
+ AWSEndpointServiceAvailable ConditionType = "EndpointServiceAvailable"
+
+ // AWSEndpointServiceAvailable indicates whether the AWS Endpoint has been
+ // created in the guest VPC
+ AWSEndpointAvailable ConditionType = "EndpointAvailable"
+
+ AWSSuccessReason string = "AWSSuccess"
+ AWSErrorReason string = "AWSError"
+)
+
+// AWSEndpointServiceSpec defines the desired state of AWSEndpointService
+type AWSEndpointServiceSpec struct {
+ // The name of the NLB for which an Endpoint Service should be configured
+ NetworkLoadBalancerName string `json:"networkLoadBalancerName"`
+
+ // SubnetIDs is the list of subnet IDs to which guest nodes can attach
+ // +optional
+ SubnetIDs []string `json:"subnetIDs,omitempty"`
+
+ // Tags to apply to the EndpointService
+ // +optional
+ ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"`
+}
+
+// AWSEndpointServiceStatus defines the observed state of AWSEndpointService
+type AWSEndpointServiceStatus struct {
+ // EndpointServiceName is the name of the Endpoint Service created in the
+ // management VPC
+ // +optional
+ EndpointServiceName string `json:"endpointServiceName,omitempty"`
+
+ // EndpointID is the ID of the Endpoint created in the guest VPC
+ // +optional
+ EndpointID string `json:"endpointID,omitempty"`
+
+ // DNSName are the names for the records created in the hypershift private zone
+ // +optional
+ DNSNames []string `json:"dnsNames,omitempty"`
+
+ // DNSZoneID is ID for the hypershift private zone
+ // +optional
+ DNSZoneID string `json:"dnsZoneID,omitempty"`
+
+ // Conditions contains details for the current state of the Endpoint Service
+ // request If there is an error processing the request e.g. the NLB doesn't
+ // exist, then the Available condition will be false, reason AWSErrorReason,
+ // and the error reported in the message.
+ //
+ // Current condition types are: "Available"
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=awsendpointservices,scope=Namespaced
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// AWSEndpointService specifies a request for an Endpoint Service in AWS
+type AWSEndpointService struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec AWSEndpointServiceSpec `json:"spec,omitempty"`
+ Status AWSEndpointServiceStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// AWSEndpointServiceList contains a list of AWSEndpointService
+type AWSEndpointServiceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []AWSEndpointService `json:"items"`
+}
diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go
new file mode 100644
index 00000000000..742696a185e
--- /dev/null
+++ b/api/v1beta1/groupversion_info.go
@@ -0,0 +1,20 @@
+// Package v1alpha1 contains API Schema definitions for the hypershift.openshift.io v1alpha1 API group
+// +kubebuilder:object:generate=true
+// +groupName=hypershift.openshift.io
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "hypershift.openshift.io", Version: "v1beta1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/api/v1beta1/hosted_controlplane.go b/api/v1beta1/hosted_controlplane.go
new file mode 100644
index 00000000000..d0cecd3dc7b
--- /dev/null
+++ b/api/v1beta1/hosted_controlplane.go
@@ -0,0 +1,282 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func init() {
+ SchemeBuilder.Register(&HostedControlPlane{})
+ SchemeBuilder.Register(&HostedControlPlaneList{})
+}
+
+// HostedControlPlane defines the desired state of HostedControlPlane
+// +kubebuilder:resource:path=hostedcontrolplanes,shortName=hcp;hcps,scope=Namespaced,categories=cluster-api
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:object:root=true
+type HostedControlPlane struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec HostedControlPlaneSpec `json:"spec,omitempty"`
+ Status HostedControlPlaneStatus `json:"status,omitempty"`
+}
+
+// HostedControlPlaneSpec defines the desired state of HostedControlPlane
+type HostedControlPlaneSpec struct {
+ // ReleaseImage is the release image applied to the hosted control plane.
+ ReleaseImage string `json:"releaseImage"`
+
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
+ PullSecret corev1.LocalObjectReference `json:"pullSecret"`
+
+ // IssuerURL is an OIDC issuer URL which is used as the issuer in all
+ // ServiceAccount tokens generated by the control plane API server. The
+ // default value is kubernetes.default.svc, which only works for in-cluster
+ // validation.
+ IssuerURL string `json:"issuerURL"`
+
+ // Networking specifies network configuration for the cluster.
+ // Temporarily optional for backward compatibility, required in future releases.
+ // +optional
+ Networking ClusterNetworking `json:"networking,omitempty"`
+
+ SSHKey corev1.LocalObjectReference `json:"sshKey"`
+
+ // ClusterID is the unique id that identifies the cluster externally.
+ // Making it optional here allows us to keep compatibility with previous
+ // versions of the control-plane-operator that have no knowledge of this
+ // field.
+ // +optional
+ ClusterID string `json:"clusterID,omitempty"`
+
+ InfraID string `json:"infraID"`
+ Platform PlatformSpec `json:"platform"`
+ DNS DNSSpec `json:"dns"`
+
+ // ServiceAccountSigningKey is a reference to a secret containing the private key
+ // used by the service account token issuer. The secret is expected to contain
+ // a single key named "key". If not specified, a service account signing key will
+ // be generated automatically for the cluster.
+ //
+ // +optional
+ ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"`
+
+ // ControllerAvailabilityPolicy specifies the availability policy applied to
+ // critical control plane components. The default value is SingleReplica.
+ //
+ // +optional
+ // +kubebuilder:default:="SingleReplica"
+ ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"`
+
+ // InfrastructureAvailabilityPolicy specifies the availability policy applied
+ // to infrastructure services which run on cluster nodes. The default value is
+ // SingleReplica.
+ //
+ // +optional
+ // +kubebuilder:default:="SingleReplica"
+ InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"`
+
+ // FIPS specifies if the nodes for the cluster will be running in FIPS mode
+ // +optional
+ FIPS bool `json:"fips"`
+
+ // KubeConfig specifies the name and key for the kubeconfig secret
+ // +optional
+ KubeConfig *KubeconfigSecretRef `json:"kubeconfig,omitempty"`
+
+ // Services defines metadata about how control plane services are published
+ // in the management cluster.
+ Services []ServicePublishingStrategyMapping `json:"services"`
+
+ // AuditWebhook contains metadata for configuring an audit webhook
+ // endpoint for a cluster to process cluster audit events. It references
+ // a secret that contains the webhook information for the audit webhook endpoint.
+ // It is a secret because if the endpoint has MTLS the kubeconfig will contain client
+ // keys. This is currently only supported in IBM Cloud. The kubeconfig needs to be stored
+ // in the secret with a secret key name that corresponds to the constant AuditWebhookKubeconfigKey.
+ // +optional
+ AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"`
+
+ // Etcd contains metadata about the etcd cluster the hypershift managed Openshift control plane components
+ // use to store data.
+ Etcd EtcdSpec `json:"etcd"`
+
+ // Configuration embeds resources that correspond to the openshift configuration API:
+ // https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html
+ // +kubebuilder:validation:Optional
+ Configuration *ClusterConfiguration `json:"configuration,omitempty"`
+
+ // ImageContentSources lists sources/repositories for the release-image content.
+ // +optional
+ ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"`
+
+ // AdditionalTrustBundle references a ConfigMap containing a PEM-encoded X.509 certificate bundle
+ // +optional
+ AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"`
+
+ // SecretEncryption contains metadata about the kubernetes secret encryption strategy being used for the
+ // cluster when applicable.
+ // +optional
+ SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"`
+
+ // PausedUntil is a field that can be used to pause reconciliation on a resource.
+ // Either a date can be provided in RFC3339 format or a boolean. If a date is
+ // provided: reconciliation is paused on the resource until that date. If the boolean true is
+ // provided: reconciliation is paused on the resource until the field is removed.
+ // +optional
+ PausedUntil *string `json:"pausedUntil,omitempty"`
+
+ // OLMCatalogPlacement specifies the placement of OLM catalog components. By default,
+ // this is set to management and OLM catalog components are deployed onto the management
+ // cluster. If set to guest, the OLM catalog components will be deployed onto the guest
+ // cluster.
+ //
+ // +kubebuilder:default=management
+ // +optional
+ // +immutable
+ OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"`
+
+ // Autoscaling specifies auto-scaling behavior that applies to all NodePools
+ // associated with the control plane.
+ //
+ // +optional
+ Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"`
+
+ // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled.
+ //
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+}
+
+// AvailabilityPolicy specifies a high level availability policy for components.
+type AvailabilityPolicy string
+
+const (
+ // HighlyAvailable means components should be resilient to problems across
+ // fault boundaries as defined by the component to which the policy is
+ // attached. This usually means running critical workloads with 3 replicas and
+ // with little or no toleration of disruption of the component.
+ HighlyAvailable AvailabilityPolicy = "HighlyAvailable"
+
+ // SingleReplica means components are not expected to be resilient to problems
+ // across most fault boundaries associated with high availability. This
+ // usually means running critical workloads with just 1 replica and with
+ // toleration of full disruption of the component.
+ SingleReplica AvailabilityPolicy = "SingleReplica"
+)
+
+type KubeconfigSecretRef struct {
+ Name string `json:"name"`
+ Key string `json:"key"`
+}
+
+type ConditionType string
+
+const (
+ HostedControlPlaneAvailable ConditionType = "Available"
+ HostedControlPlaneDegraded ConditionType = "Degraded"
+ EtcdSnapshotRestored ConditionType = "EtcdSnapshotRestored"
+ CVOScaledDown ConditionType = "CVOScaledDown"
+ CloudResourcesDestroyed ConditionType = "CloudResourcesDestroyed"
+)
+
+// HostedControlPlaneStatus defines the observed state of HostedControlPlane
+type HostedControlPlaneStatus struct {
+ // Ready denotes that the HostedControlPlane API Server is ready to
+ // receive requests
+ // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default=false
+ Ready bool `json:"ready"`
+
+ // Initialized denotes whether or not the control plane has
+ // provided a kubeadm-config.
+ // Once this condition is marked true, its value is never changed. See the Ready condition for an indication of
+ // the current readiness of the cluster's control plane.
+ // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default=false
+ Initialized bool `json:"initialized"`
+
+ // ExternalManagedControlPlane indicates to cluster-api that the control plane
+ // is managed by an external service.
+ // https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468
+ // +kubebuilder:default=true
+ ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"`
+
+ // ControlPlaneEndpoint contains the endpoint information by which
+ // external clients can access the control plane. This is populated
+ // after the infrastructure is ready.
+ // +kubebuilder:validation:Optional
+ ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"`
+
+ // OAuthCallbackURLTemplate contains a template for the URL to use as a callback
+ // for identity providers. The [identity-provider-name] placeholder must be replaced
+ // with the name of an identity provider defined on the HostedCluster.
+ // This is populated after the infrastructure is ready.
+ // +kubebuilder:validation:Optional
+ OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"`
+
+ // versionStatus is the status of the release version applied by the
+ // hosted control plane operator.
+ // +optional
+ VersionStatus *ClusterVersionStatus `json:"versionStatus,omitempty"`
+
+ // Version is the semantic version of the release applied by
+ // the hosted control plane operator
+ //
+ // Deprecated: Use versionStatus.desired.version instead.
+ // +kubebuilder:validation:Optional
+ Version string `json:"version,omitempty"`
+
+ // ReleaseImage is the release image applied to the hosted control plane.
+ //
+ // Deprecated: Use versionStatus.desired.image instead.
+ // +optional
+ ReleaseImage string `json:"releaseImage,omitempty"`
+
+ // lastReleaseImageTransitionTime is the time of the last update to the current
+ // releaseImage property.
+ //
+ // Deprecated: Use versionStatus.history[0].startedTime instead.
+ // +kubebuilder:validation:Optional
+ LastReleaseImageTransitionTime *metav1.Time `json:"lastReleaseImageTransitionTime,omitempty"`
+
+ // KubeConfig is a reference to the secret containing the default kubeconfig
+ // for this control plane.
+ KubeConfig *KubeconfigSecretRef `json:"kubeConfig,omitempty"`
+
+ // KubeadminPassword is a reference to the secret containing the initial kubeadmin password
+ // for the guest cluster.
+ // +optional
+ KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"`
+
+ // Condition contains details for one aspect of the current state of the HostedControlPlane.
+ // Current condition types are: "Available"
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+type APIEndpoint struct {
+ // Host is the hostname on which the API server is serving.
+ Host string `json:"host"`
+
+ // Port is the port on which the API server is serving.
+ Port int32 `json:"port"`
+}
+
+// +kubebuilder:object:root=true
+// HostedControlPlaneList contains a list of HostedControlPlanes.
+type HostedControlPlaneList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []HostedControlPlane `json:"items"`
+}
diff --git a/api/v1beta1/hostedcluster_types.go b/api/v1beta1/hostedcluster_types.go
new file mode 100644
index 00000000000..d09473a6164
--- /dev/null
+++ b/api/v1beta1/hostedcluster_types.go
@@ -0,0 +1,1778 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ configv1 "github.com/openshift/api/config/v1"
+
+ "github.com/openshift/hypershift/api/util/ipnet"
+)
+
+func init() {
+ SchemeBuilder.Register(&HostedCluster{}, &HostedClusterList{})
+}
+
+const (
+ // AuditWebhookKubeconfigKey is the key name in the AuditWebhook secret that stores audit webhook kubeconfig
+ AuditWebhookKubeconfigKey = "webhook-kubeconfig"
+ DisablePKIReconciliationAnnotation = "hypershift.openshift.io/disable-pki-reconciliation"
+ IdentityProviderOverridesAnnotationPrefix = "idpoverrides.hypershift.openshift.io/"
+ OauthLoginURLOverrideAnnotation = "oauth.hypershift.openshift.io/login-url-override"
+ // KonnectivityServerImageAnnotation is a temporary annotation that allows the specification of the konnectivity server image.
+ // This will be removed when Konnectivity is added to the Openshift release payload
+ KonnectivityServerImageAnnotation = "hypershift.openshift.io/konnectivity-server-image"
+ // KonnectivityAgentImageAnnotation is a temporary annotation that allows the specification of the konnectivity agent image.
+ // This will be removed when Konnectivity is added to the Openshift release payload
+ KonnectivityAgentImageAnnotation = "hypershift.openshift.io/konnectivity-agent-image"
+ // ControlPlaneOperatorImageAnnotation is a annotation that allows the specification of the control plane operator image.
+ // This is used for development and e2e workflows
+ ControlPlaneOperatorImageAnnotation = "hypershift.openshift.io/control-plane-operator-image"
+ // RestartDateAnnotation is a annotation that can be used to trigger a rolling restart of all components managed by hypershift.
+ // it is important in some situations like CA rotation where components need to be fully restarted to pick up new CAs. It's also
+ // important in some recovery situations where a fresh start of the component helps fix symptoms a user might be experiencing.
+ RestartDateAnnotation = "hypershift.openshift.io/restart-date"
+ // ReleaseImageAnnotation is an annotation that can be used to see what release image a given deployment is tied to
+ ReleaseImageAnnotation = "hypershift.openshift.io/release-image"
+ // ClusterAPIManagerImage is an annotation that allows the specification of the cluster api manager image.
+ // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side:
+ // no images can be pulled from registries outside of IBM Cloud's official regional registries
+ ClusterAPIManagerImage = "hypershift.openshift.io/capi-manager-image"
+ // ClusterAutoscalerImage is an annotation that allows the specification of the cluster autoscaler image.
+ // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side:
+ // no images can be pulled from registries outside of IBM Cloud's official regional registries
+ ClusterAutoscalerImage = "hypershift.openshift.io/cluster-autoscaler-image"
+ // AWSKMSProviderImage is an annotation that allows the specification of the AWS kms provider image.
+ // Upstream code located at: https://github.com/kubernetes-sigs/aws-encryption-provider
+ AWSKMSProviderImage = "hypershift.openshift.io/aws-kms-provider-image"
+ // IBMCloudKMSProviderImage is an annotation that allows the specification of the IBM Cloud kms provider image.
+ IBMCloudKMSProviderImage = "hypershift.openshift.io/ibmcloud-kms-provider-image"
+ // PortierisImageAnnotation is an annotation that allows the specification of the portieries component
+ // (performs container image verification).
+ PortierisImageAnnotation = "hypershift.openshift.io/portieris-image"
+ // Configure ingress controller with endpoint publishing strategy as Private.
+ // This overrides any opinionated strategy set by platform in ReconcileDefaultIngressController.
+ // It's used by IBM cloud to support ingress endpoint publishing strategy scope
+ // NOTE: We'll expose this in the API if the use case gets generalised.
+ PrivateIngressControllerAnnotation = "hypershift.openshift.io/private-ingress-controller"
+
+ // ClusterAPIProviderAWSImage overrides the CAPI AWS provider image to use for
+ // a HostedControlPlane.
+ ClusterAPIProviderAWSImage = "hypershift.openshift.io/capi-provider-aws-image"
+
+ // ClusterAPIKubeVirtProviderImage overrides the CAPI KubeVirt provider image to use for
+ // a HostedControlPlane.
+ ClusterAPIKubeVirtProviderImage = "hypershift.openshift.io/capi-provider-kubevirt-image"
+
+ // ClusterAPIAgentProviderImage overrides the CAPI Agent provider image to use for
+ // a HostedControlPlane.
+ ClusterAPIAgentProviderImage = "hypershift.openshift.io/capi-provider-agent-image"
+
+ // ClusterAPIAzureProviderImage overrides the CAPI Azure provider image to use for
+ // a HostedControlPlane.
+ ClusterAPIAzureProviderImage = "hypershift.openshift.io/capi-provider-azure-image"
+
+ // ClusterAPIPowerVSProviderImage overrides the CAPI PowerVS provider image to use for
+ // a HostedControlPlane.
+ ClusterAPIPowerVSProviderImage = "hypershift.openshift.io/capi-provider-powervs-image"
+
+ // AESCBCKeySecretKey defines the Kubernetes secret key name that contains the aescbc encryption key
+ // in the AESCBC secret encryption strategy
+ AESCBCKeySecretKey = "key"
+ // IBMCloudIAMAPIKeySecretKey defines the Kubernetes secret key name that contains
+ // the customer IBMCloud apikey in the unmanaged authentication strategy for IBMCloud KMS secret encryption
+ IBMCloudIAMAPIKeySecretKey = "iam_apikey"
+ // AWSCredentialsFileSecretKey defines the Kubernetes secret key name that contains
+ // the customer AWS credentials in the unmanaged authentication strategy for AWS KMS secret encryption
+ AWSCredentialsFileSecretKey = "credentials"
+
+ // ControlPlaneComponent identifies a resource as belonging to a hosted control plane.
+ ControlPlaneComponent = "hypershift.openshift.io/control-plane-component"
+
+ // OperatorComponent identifies a component as belonging to the operator.
+ OperatorComponent = "hypershift.openshift.io/operator-component"
+ // MachineApproverImage is an annotation that allows the specification of the machine approver image.
+ // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side:
+ // no images can be pulled from registries outside of IBM Cloud's official regional registries
+ MachineApproverImage = "hypershift.openshift.io/machine-approver-image"
+
+ // ExternalDNSHostnameAnnotation is the annotation external-dns uses to register DNS name for different HCP services.
+ ExternalDNSHostnameAnnotation = "external-dns.alpha.kubernetes.io/hostname"
+
+ // ForceUpgradeToAnnotation is the annotation that forces HostedCluster upgrade even if the underlying ClusterVersion
+ // is reporting it is not Upgradeable. The annotation value must be set to the release image being forced.
+ ForceUpgradeToAnnotation = "hypershift.openshift.io/force-upgrade-to"
+
+ // ServiceAccountSigningKeySecretKey is the name of the secret key that should contain the service account signing
+ // key if specified.
+ ServiceAccountSigningKeySecretKey = "key"
+
+ // DisableProfilingAnnotation is the annotation that allows disabling profiling for control plane components.
+ // Any components specified in this list will have profiling disabled. Profiling is disabled by default for etcd and konnectivity.
+ // Components this annotation can apply to: kube-scheduler, kube-controller-manager, kube-apiserver.
+ DisableProfilingAnnotation = "hypershift.openshift.io/disable-profiling"
+
+ // CleanupCloudResourcesAnnotation is an annotation that indicates whether a guest cluster's resources should be
+ // removed when deleting the corresponding HostedCluster. If set to "true", resources created on the cloud provider during the life
+ // of the cluster will be removed, including image registry storage, ingress dns records, load balancers, and persistent storage.
+ CleanupCloudResourcesAnnotation = "hypershift.openshift.io/cleanup-cloud-resources"
+
+ // ResourceRequestOverrideAnnotationPrefix is a prefix for an annotation to override resource requests for a particular deployment/container
+ // in a hosted control plane. The format of the annotation is:
+ // resource-request-override.hypershift.openshift.io/[deployment-name].[container-name]: [resource-type-1]=[value1],[resource-type-2]=[value2],...
+ // For example, to override the memory and cpu request for the Kubernetes APIServer:
+ // resource-request-override.hypershift.openshift.io/kube-apiserver.kube-apiserver: memory=3Gi,cpu=2000m
+ ResourceRequestOverrideAnnotationPrefix = "resource-request-override.hypershift.openshift.io"
+)
+
+// HostedClusterSpec is the desired behavior of a HostedCluster.
+type HostedClusterSpec struct {
+ // Release specifies the desired OCP release payload for the hosted cluster.
+ //
+ // Updating this field will trigger a rollout of the control plane. The
+ // behavior of the rollout will be driven by the ControllerAvailabilityPolicy
+ // and InfrastructureAvailabilityPolicy.
+ Release Release `json:"release"`
+
+ // ClusterID uniquely identifies this cluster. This is expected to be
+ // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
+ // hexadecimal values).
+ // As with a Kubernetes metadata.uid, this ID uniquely identifies this
+ // cluster in space and time.
+ // This value identifies the cluster in metrics pushed to telemetry and
+ // metrics produced by the control plane operators. If a value is not
+ // specified, an ID is generated. After initial creation, the value is
+ // immutable.
+ // +kubebuilder:validation:Pattern:="[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}"
+ // +optional
+ ClusterID string `json:"clusterID,omitempty"`
+
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
+ // InfraID is a globally unique identifier for the cluster. This identifier
+ // will be used to associate various cloud resources with the HostedCluster
+ // and its associated NodePools.
+ //
+ // +optional
+ // +immutable
+ InfraID string `json:"infraID,omitempty"`
+
+ // Platform specifies the underlying infrastructure provider for the cluster
+ // and is used to configure platform specific behavior.
+ //
+ // +immutable
+ Platform PlatformSpec `json:"platform"`
+
+ // ControllerAvailabilityPolicy specifies the availability policy applied to
+ // critical control plane components. The default value is SingleReplica.
+ //
+ // +optional
+ // +kubebuilder:default:="SingleReplica"
+ // +immutable
+ ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"`
+
+ // InfrastructureAvailabilityPolicy specifies the availability policy applied
+ // to infrastructure services which run on cluster nodes. The default value is
+ // SingleReplica.
+ //
+ // +optional
+ // +kubebuilder:default:="SingleReplica"
+ // +immutable
+ InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"`
+
+ // DNS specifies DNS configuration for the cluster.
+ //
+ // +immutable
+ DNS DNSSpec `json:"dns,omitempty"`
+
+ // Networking specifies network configuration for the cluster.
+ //
+ // +immutable
+ Networking ClusterNetworking `json:"networking"`
+
+ // Autoscaling specifies auto-scaling behavior that applies to all NodePools
+ // associated with the control plane.
+ //
+ // +optional
+ Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"`
+
+ // Etcd specifies configuration for the control plane etcd cluster. The
+ // default ManagementType is Managed. Once set, the ManagementType cannot be
+ // changed.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default={managementType: "Managed", managed: {storage: {type: "PersistentVolume", persistentVolume: {size: "4Gi"}}}}
+ // +immutable
+ Etcd EtcdSpec `json:"etcd"`
+
+ // Services specifies how individual control plane services are published from
+ // the hosting cluster of the control plane.
+ //
+ // If a given service is not present in this list, it will be exposed publicly
+ // by default.
+ Services []ServicePublishingStrategyMapping `json:"services"`
+
+ // PullSecret references a pull secret to be injected into the container
+ // runtime of all cluster nodes. The secret must have a key named
+ // ".dockerconfigjson" whose value is the pull secret JSON.
+ //
+ // +immutable
+ PullSecret corev1.LocalObjectReference `json:"pullSecret"`
+
+ // SSHKey references an SSH key to be injected into all cluster node sshd
+ // servers. The secret must have a single key "id_rsa.pub" whose value is the
+ // public part of an SSH key.
+ //
+ // +immutable
+ SSHKey corev1.LocalObjectReference `json:"sshKey"`
+
+ // IssuerURL is an OIDC issuer URL which is used as the issuer in all
+ // ServiceAccount tokens generated by the control plane API server. The
+ // default value is kubernetes.default.svc, which only works for in-cluster
+ // validation.
+ //
+ // +kubebuilder:default:="https://kubernetes.default.svc"
+ // +immutable
+ // +optional
+ // +kubebuilder:validation:Format=uri
+ IssuerURL string `json:"issuerURL,omitempty"`
+
+ // ServiceAccountSigningKey is a reference to a secret containing the private key
+ // used by the service account token issuer. The secret is expected to contain
+ // a single key named "key". If not specified, a service account signing key will
+ // be generated automatically for the cluster. When specifying a service account
+ // signing key, a IssuerURL must also be specified.
+ //
+ // +immutable
+ // +kubebuilder:validation:Optional
+ // +optional
+ ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"`
+
+ // Configuration specifies configuration for individual OCP components in the
+ // cluster, represented as embedded resources that correspond to the openshift
+ // configuration API.
+ //
+ // +kubebuilder:validation:Optional
+ // +optional
+ Configuration *ClusterConfiguration `json:"configuration,omitempty"`
+
+ // AuditWebhook contains metadata for configuring an audit webhook endpoint
+ // for a cluster to process cluster audit events. It references a secret that
+ // contains the webhook information for the audit webhook endpoint. It is a
+ // secret because if the endpoint has mTLS the kubeconfig will contain client
+ // keys. The kubeconfig needs to be stored in the secret with a secret key
+ // name that corresponds to the constant AuditWebhookKubeconfigKey.
+ //
+ // This field is currently only supported on the IBMCloud platform.
+ //
+ // +optional
+ // +immutable
+ AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"`
+
+ // ImageContentSources specifies image mirrors that can be used by cluster
+ // nodes to pull content.
+ //
+ // +optional
+ // +immutable
+ ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"`
+
+ // AdditionalTrustBundle is a reference to a ConfigMap containing a
+ // PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes
+ //
+ // +optional
+ AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"`
+
+ // SecretEncryption specifies a Kubernetes secret encryption strategy for the
+ // control plane.
+ //
+ // +optional
+ SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"`
+
+ // FIPS indicates whether this cluster's nodes will be running in FIPS mode.
+ // If set to true, the control plane's ignition server will be configured to
+ // expect that nodes joining the cluster will be FIPS-enabled.
+ //
+ // +optional
+ // +immutable
+ FIPS bool `json:"fips"`
+
+ // PausedUntil is a field that can be used to pause reconciliation on a resource.
+ // Either a date can be provided in RFC3339 format or a boolean. If a date is
+ // provided: reconciliation is paused on the resource until that date. If the boolean true is
+ // provided: reconciliation is paused on the resource until the field is removed.
+ // +optional
+ PausedUntil *string `json:"pausedUntil,omitempty"`
+
+ // OLMCatalogPlacement specifies the placement of OLM catalog components. By default,
+ // this is set to management and OLM catalog components are deployed onto the management
+ // cluster. If set to guest, the OLM catalog components will be deployed onto the guest
+ // cluster.
+ //
+ // +kubebuilder:default=management
+ // +optional
+ // +immutable
+ OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"`
+
+ // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled.
+ //
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+}
+
+// OLMCatalogPlacement is an enum specifying the placement of OLM catalog components.
+// +kubebuilder:validation:Enum=management;guest
+type OLMCatalogPlacement string
+
+const (
+ // ManagementOLMCatalogPlacement indicates OLM catalog components will be placed in
+ // the management cluster.
+ ManagementOLMCatalogPlacement OLMCatalogPlacement = "management"
+
+ // GuestOLMCatalogPlacement indicates OLM catalog components will be placed in
+ // the guest cluster.
+ GuestOLMCatalogPlacement OLMCatalogPlacement = "guest"
+)
+
+// ImageContentSource specifies image mirrors that can be used by cluster nodes
+// to pull content. For cluster workloads, if a container image registry host of
+// the pullspec matches Source then one of the Mirrors are substituted as hosts
+// in the pullspec and tried in order to fetch the image.
+type ImageContentSource struct {
+ // Source is the repository that users refer to, e.g. in image pull
+ // specifications.
+ //
+ // +immutable
+ Source string `json:"source"`
+
+ // Mirrors are one or more repositories that may also contain the same images.
+ //
+ // +optional
+ // +immutable
+ Mirrors []string `json:"mirrors,omitempty"`
+}
+
+// ServicePublishingStrategyMapping specifies how individual control plane
+// services are published from the hosting cluster of a control plane.
+type ServicePublishingStrategyMapping struct {
+ // Service identifies the type of service being published.
+ //
+ // +kubebuilder:validation:Enum=APIServer;OAuthServer;OIDC;Konnectivity;Ignition;OVNSbDb
+ // +immutable
+ Service ServiceType `json:"service"`
+
+ // ServicePublishingStrategy specifies how to publish Service.
+ ServicePublishingStrategy `json:"servicePublishingStrategy"`
+}
+
+// ServicePublishingStrategy specfies how to publish a ServiceType.
+type ServicePublishingStrategy struct {
+ // Type is the publishing strategy used for the service.
+ //
+ // +kubebuilder:validation:Enum=LoadBalancer;NodePort;Route;None
+ // +immutable
+ Type PublishingStrategyType `json:"type"`
+
+ // NodePort configures exposing a service using a NodePort.
+ NodePort *NodePortPublishingStrategy `json:"nodePort,omitempty"`
+
+ // LoadBalancer configures exposing a service using a LoadBalancer.
+ LoadBalancer *LoadBalancerPublishingStrategy `json:"loadBalancer,omitempty"`
+
+ // Route configures exposing a service using a Route.
+ Route *RoutePublishingStrategy `json:"route,omitempty"`
+}
+
+// PublishingStrategyType defines publishing strategies for services.
+type PublishingStrategyType string
+
+var (
+ // LoadBalancer exposes a service with a LoadBalancer kube service.
+ LoadBalancer PublishingStrategyType = "LoadBalancer"
+ // NodePort exposes a service with a NodePort kube service.
+ NodePort PublishingStrategyType = "NodePort"
+ // Route exposes services with a Route + ClusterIP kube service.
+ Route PublishingStrategyType = "Route"
+ // None disables exposing the service
+ None PublishingStrategyType = "None"
+)
+
+// ServiceType defines what control plane services can be exposed from the
+// management control plane.
+type ServiceType string
+
+var (
+ // APIServer is the control plane API server.
+ APIServer ServiceType = "APIServer"
+
+ // Konnectivity is the control plane Konnectivity networking service.
+ Konnectivity ServiceType = "Konnectivity"
+
+ // OAuthServer is the control plane OAuth service.
+ OAuthServer ServiceType = "OAuthServer"
+
+ // OIDC is the control plane OIDC service.
+ OIDC ServiceType = "OIDC"
+
+ // Ignition is the control plane ignition service for nodes.
+ Ignition ServiceType = "Ignition"
+
+ // OVNSbDb is the optional control plane ovn southbound database service used by OVNKubernetes CNI.
+ OVNSbDb ServiceType = "OVNSbDb"
+)
+
+// NodePortPublishingStrategy specifies a NodePort used to expose a service.
+type NodePortPublishingStrategy struct {
+ // Address is the host/ip that the NodePort service is exposed over.
+ Address string `json:"address"`
+
+ // Port is the port of the NodePort service. If <=0, the port is dynamically
+ // assigned when the service is created.
+ Port int32 `json:"port,omitempty"`
+}
+
+// LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer.
+type LoadBalancerPublishingStrategy struct {
+ // Hostname is the name of the DNS record that will be created pointing to the LoadBalancer.
+ // +optional
+ Hostname string `json:"hostname,omitempty"`
+}
+
+// RoutePublishingStrategy specifies options for exposing a service as a Route.
+type RoutePublishingStrategy struct {
+ // Hostname is the name of the DNS record that will be created pointing to the Route.
+ // +optional
+ Hostname string `json:"hostname,omitempty"`
+}
+
+// DNSSpec specifies the DNS configuration in the cluster.
+type DNSSpec struct {
+ // BaseDomain is the base domain of the cluster.
+ //
+ // +immutable
+ BaseDomain string `json:"baseDomain"`
+
+ // PublicZoneID is the Hosted Zone ID where all the DNS records that are
+ // publicly accessible to the internet exist.
+ //
+ // +optional
+ // +immutable
+ PublicZoneID string `json:"publicZoneID,omitempty"`
+
+ // PrivateZoneID is the Hosted Zone ID where all the DNS records that are only
+ // available internally to the cluster exist.
+ //
+ // +optional
+ // +immutable
+ PrivateZoneID string `json:"privateZoneID,omitempty"`
+}
+
+// ClusterNetworking specifies network configuration for a cluster.
+type ClusterNetworking struct {
+ // MachineNetwork is the list of IP address pools for machines.
+ //
+ // +immutable
+ // +optional
+ MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"`
+
+ // ClusterNetwork is the list of IP address pools for pods.
+ //
+ // +immutable
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
+
+ // ServiceNetwork is the list of IP address pools for services.
+ // NOTE: currently only one entry is supported.
+ //
+ // +optional
+ ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork"`
+
+ // NetworkType specifies the SDN provider used for cluster networking.
+ //
+ // +kubebuilder:default:="OVNKubernetes"
+ // +immutable
+ NetworkType NetworkType `json:"networkType"`
+
+ // APIServer contains advanced network settings for the API server that affect
+ // how the APIServer is exposed inside a cluster node.
+ //
+ // +immutable
+ APIServer *APIServerNetworking `json:"apiServer,omitempty"`
+}
+
+// MachineNetworkEntry is a single IP address block for node IP blocks.
+type MachineNetworkEntry struct {
+ // CIDR is the IP block address pool for machines within the cluster.
+ CIDR ipnet.IPNet `json:"cidr"`
+}
+
+// ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks
+// are allocated with size 2^HostSubnetLength.
+type ClusterNetworkEntry struct {
+ // CIDR is the IP block address pool.
+ CIDR ipnet.IPNet `json:"cidr"`
+
+ // HostPrefix is the prefix size to allocate to each node from the CIDR.
+ // For example, 24 would allocate 2^8=256 adresses to each node. If this
+ // field is not used by the plugin, it can be left unset.
+ // +optional
+ HostPrefix int32 `json:"hostPrefix,omitempty"`
+}
+
+// ServiceNetworkEntry is a single IP address block for the service network.
+type ServiceNetworkEntry struct {
+ // CIDR is the IP block address pool for services within the cluster.
+ CIDR ipnet.IPNet `json:"cidr"`
+}
+
+//+kubebuilder:validation:Pattern:=`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$`
+type CIDRBlock string
+
+// APIServerNetworking specifies how the APIServer is exposed inside a cluster
+// node.
+type APIServerNetworking struct {
+ // AdvertiseAddress is the address that nodes will use to talk to the API
+ // server. This is an address associated with the loopback adapter of each
+ // node. If not specified, 172.20.0.1 is used.
+ AdvertiseAddress *string `json:"advertiseAddress,omitempty"`
+
+ // Port is the port at which the APIServer is exposed inside a node. Other
+ // pods using host networking cannot listen on this port. If not specified,
+ // 6443 is used.
+ Port *int32 `json:"port,omitempty"`
+
+ // AllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer
+ // If not specified, traffic is allowed from all addresses.
+ // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges
+ AllowedCIDRBlocks []CIDRBlock `json:"allowedCIDRBlocks,omitempty"`
+}
+
+// NetworkType specifies the SDN provider used for cluster networking.
+//
+// +kubebuilder:validation:Enum=OpenShiftSDN;Calico;OVNKubernetes;Other
+type NetworkType string
+
+const (
+ // OpenShiftSDN specifies OpenShiftSDN as the SDN provider
+ OpenShiftSDN NetworkType = "OpenShiftSDN"
+
+ // Calico specifies Calico as the SDN provider
+ Calico NetworkType = "Calico"
+
+ // OVNKubernetes specifies OVN as the SDN provider
+ OVNKubernetes NetworkType = "OVNKubernetes"
+
+ // Other specifies an undefined SDN provider
+ Other NetworkType = "Other"
+)
+
+// PlatformType is a specific supported infrastructure provider.
+//
+// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent;KubeVirt;Azure;PowerVS
+type PlatformType string
+
+const (
+ // AWSPlatform represents Amazon Web Services infrastructure.
+ AWSPlatform PlatformType = "AWS"
+
+ // NonePlatform represents user supplied (e.g. bare metal) infrastructure.
+ NonePlatform PlatformType = "None"
+
+ // IBMCloudPlatform represents IBM Cloud infrastructure.
+ IBMCloudPlatform PlatformType = "IBMCloud"
+
+ // AgentPlatform represents user supplied insfrastructure booted with agents.
+ AgentPlatform PlatformType = "Agent"
+
+ // KubevirtPlatform represents Kubevirt infrastructure.
+ KubevirtPlatform PlatformType = "KubeVirt"
+
+ // AzurePlatform represents Azure infrastructure.
+ AzurePlatform PlatformType = "Azure"
+
+ // PowerVSPlatform represents PowerVS infrastructure.
+ PowerVSPlatform PlatformType = "PowerVS"
+)
+
+// PlatformSpec specifies the underlying infrastructure provider for the cluster
+// and is used to configure platform specific behavior.
+type PlatformSpec struct {
+ // Type is the type of infrastructure provider for the cluster.
+ //
+ // +unionDiscriminator
+ // +immutable
+ Type PlatformType `json:"type"`
+
+ // AWS specifies configuration for clusters running on Amazon Web Services.
+ //
+ // +optional
+ // +immutable
+ AWS *AWSPlatformSpec `json:"aws,omitempty"`
+
+ // Agent specifies configuration for agent-based installations.
+ //
+ // +optional
+ // +immutable
+ Agent *AgentPlatformSpec `json:"agent,omitempty"`
+
+ // IBMCloud defines IBMCloud specific settings for components
+ IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`
+
+ // Azure defines azure specific settings
+ Azure *AzurePlatformSpec `json:"azure,omitempty"`
+
+ // PowerVS specifies configuration for clusters running on IBMCloud Power VS Service.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +optional
+ // +immutable
+ PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"`
+}
+
+// AgentPlatformSpec specifies configuration for agent-based installations.
+type AgentPlatformSpec struct {
+ // AgentNamespace is the namespace where to search for Agents for this cluster
+ AgentNamespace string `json:"agentNamespace"`
+}
+
+// IBMCloudPlatformSpec defines IBMCloud specific settings for components
+type IBMCloudPlatformSpec struct {
+ // ProviderType is a specific supported infrastructure provider within IBM Cloud.
+ ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"`
+}
+
+// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components
+type PowerVSPlatformSpec struct {
+ // AccountID is the IBMCloud account id.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ AccountID string `json:"accountID"`
+
+ // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +kubebuilder:validation:Pattern=`^crn:`
+ // +immutable
+ CISInstanceCRN string `json:"cisInstanceCRN"`
+
+ // ResourceGroup is the IBMCloud Resource Group in which the cluster resides.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ ResourceGroup string `json:"resourceGroup"`
+
+ // Region is the IBMCloud region in which the cluster resides. This configures the
+ // OCP control plane cloud integrations, and is used by NodePool to resolve
+ // the correct boot image for a given release.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ Region string `json:"region"`
+
+ // Zone is the availability zone where control plane cloud resources are
+ // created.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ Zone string `json:"zone"`
+
+ // Subnet is the subnet to use for control plane cloud resources.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ Subnet *PowerVSResourceReference `json:"subnet"`
+
+ // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created.
+ // Power VS service is a container for all Power VS instances at a specific geographic region.
+ // serviceInstance can be created via IBM Cloud catalog or CLI.
+ // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli.
+ //
+ // More detail about Power VS service instance.
+ // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ //
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ ServiceInstanceID string `json:"serviceInstanceID"`
+
+ // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control
+ // plane.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ VPC *PowerVSVPC `json:"vpc"`
+
+ // KubeCloudControllerCreds is a reference to a secret containing cloud
+ // credentials with permissions matching the cloud controller policy.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // TODO(dan): document the "cloud controller policy"
+ //
+ // +immutable
+ KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"`
+
+ // NodePoolManagementCreds is a reference to a secret containing cloud
+ // credentials with permissions matching the node pool management policy.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // TODO(dan): document the "node pool management policy"
+ //
+ // +immutable
+ NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"`
+
+ // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud
+ // credentials for ingress operator to get authenticated with ibm cloud.
+ //
+ // +immutable
+ IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"`
+
+ // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud
+ // credentials for storage operator to get authenticated with ibm cloud.
+ //
+ // +immutable
+ StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"`
+}
+
+// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control
+// plane.
+type PowerVSVPC struct {
+ // Name for VPC to used for all the service load balancer.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ Name string `json:"name"`
+
+ // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic
+ // into the OCP cluster.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ Region string `json:"region"`
+
+ // Zone is the availability zone where load balancer cloud resources are
+ // created.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ // +optional
+ Zone string `json:"zone,omitempty"`
+
+ // Subnet is the subnet to use for load balancer.
+ // This field is immutable. Once set, It can't be changed.
+ //
+ // +immutable
+ // +optional
+ Subnet string `json:"subnet,omitempty"`
+}
+
+// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name.
+// Only one of ID, or Name may be specified. Specifying more than one will result in
+// a validation error.
+type PowerVSResourceReference struct {
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+
+ // Name of resource
+ // +optional
+ Name *string `json:"name,omitempty"`
+}
+
+// AWSCloudProviderConfig specifies AWS networking configuration.
+type AWSCloudProviderConfig struct {
+ // Subnet is the subnet to use for control plane cloud resources.
+ //
+ // +optional
+ Subnet *AWSResourceReference `json:"subnet,omitempty"`
+
+ // Zone is the availability zone where control plane cloud resources are
+ // created.
+ //
+ // +optional
+ Zone string `json:"zone,omitempty"`
+
+ // VPC is the VPC to use for control plane cloud resources.
+ VPC string `json:"vpc"`
+}
+
+// AWSEndpointAccessType specifies the publishing scope of cluster endpoints.
+type AWSEndpointAccessType string
+
+const (
+ // Public endpoint access allows public API server access and public node
+ // communication with the control plane.
+ Public AWSEndpointAccessType = "Public"
+
+ // PublicAndPrivate endpoint access allows public API server access and
+ // private node communication with the control plane.
+ PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate"
+
+ // Private endpoint access allows only private API server access and private
+ // node communication with the control plane.
+ Private AWSEndpointAccessType = "Private"
+)
+
+// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services.
+type AWSPlatformSpec struct {
+ // Region is the AWS region in which the cluster resides. This configures the
+ // OCP control plane cloud integrations, and is used by NodePool to resolve
+ // the correct boot AMI for a given release.
+ //
+ // +immutable
+ Region string `json:"region"`
+
+ // CloudProviderConfig specifies AWS networking configuration for the control
+ // plane.
+ // This is mainly used for cloud provider controller config:
+ // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364
+ // TODO(dan): should this be named AWSNetworkConfig?
+ //
+ // +optional
+ // +immutable
+ CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"`
+
+ // ServiceEndpoints specifies optional custom endpoints which will override
+ // the default service endpoint of specific AWS Services.
+ //
+ // There must be only one ServiceEndpoint for a given service name.
+ //
+ // +optional
+ // +immutable
+ ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"`
+
+ // RolesRef contains references to various AWS IAM roles required to enable
+ // integrations such as OIDC.
+ //
+ // +immutable
+ RolesRef AWSRolesRef `json:"rolesRef"`
+
+ // ResourceTags is a list of additional tags to apply to AWS resources created
+ // for the cluster. See
+ // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for
+ // information on tagging AWS resources. AWS supports a maximum of 50 tags per
+ // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available
+ // for the user.
+ //
+ // +kubebuilder:validation:MaxItems=25
+ // +optional
+ ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"`
+
+ // EndpointAccess specifies the publishing scope of cluster endpoints. The
+ // default is Public.
+ //
+ // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private
+ // +kubebuilder:default=Public
+ // +optional
+ EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"`
+}
+
+type AWSRoleCredentials struct {
+ ARN string `json:"arn"`
+ Namespace string `json:"namespace"`
+ Name string `json:"name"`
+}
+
+// AWSResourceTag is a tag to apply to AWS resources created for the cluster.
+type AWSResourceTag struct {
+ // Key is the key of the tag.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=128
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ Key string `json:"key"`
+ // Value is the value of the tag.
+ //
+ // Some AWS service do not support empty values. Since tags are added to
+ // resources in many services, the length of the tag value must meet the
+ // requirements of all services.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ Value string `json:"value"`
+}
+
+// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API.
+type AWSRolesRef struct {
+ // The referenced role must have a trust relationship that allows it to be assumed via web identity.
+ // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ // Example:
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Principal": {
+ // "Federated": "{{ .ProviderARN }}"
+ // },
+ // "Action": "sts:AssumeRoleWithWebIdentity",
+ // "Condition": {
+ // "StringEquals": {
+ // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }}
+ // }
+ // }
+ // }
+ // ]
+ // }
+ //
+ // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "elasticloadbalancing:DescribeLoadBalancers",
+ // "tag:GetResources",
+ // "route53:ListHostedZones"
+ // ],
+ // "Resource": "*"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "route53:ChangeResourceRecordSets"
+ // ],
+ // "Resource": [
+ // "arn:aws:route53:::PUBLIC_ZONE_ID",
+ // "arn:aws:route53:::PRIVATE_ZONE_ID"
+ // ]
+ // }
+ // ]
+ // }
+ IngressARN string `json:"ingressARN"`
+
+ // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "s3:CreateBucket",
+ // "s3:DeleteBucket",
+ // "s3:PutBucketTagging",
+ // "s3:GetBucketTagging",
+ // "s3:PutBucketPublicAccessBlock",
+ // "s3:GetBucketPublicAccessBlock",
+ // "s3:PutEncryptionConfiguration",
+ // "s3:GetEncryptionConfiguration",
+ // "s3:PutLifecycleConfiguration",
+ // "s3:GetLifecycleConfiguration",
+ // "s3:GetBucketLocation",
+ // "s3:ListBucket",
+ // "s3:GetObject",
+ // "s3:PutObject",
+ // "s3:DeleteObject",
+ // "s3:ListBucketMultipartUploads",
+ // "s3:AbortMultipartUpload",
+ // "s3:ListMultipartUploadParts"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ ImageRegistryARN string `json:"imageRegistryARN"`
+
+ // StorageARN is an ARN value referencing a role appropriate for the Storage Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:AttachVolume",
+ // "ec2:CreateSnapshot",
+ // "ec2:CreateTags",
+ // "ec2:CreateVolume",
+ // "ec2:DeleteSnapshot",
+ // "ec2:DeleteTags",
+ // "ec2:DeleteVolume",
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeSnapshots",
+ // "ec2:DescribeTags",
+ // "ec2:DescribeVolumes",
+ // "ec2:DescribeVolumesModifications",
+ // "ec2:DetachVolume",
+ // "ec2:ModifyVolume"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ StorageARN string `json:"storageARN"`
+
+ // NetworkARN is an ARN value referencing a role appropriate for the Network Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeInstanceStatus",
+ // "ec2:DescribeInstanceTypes",
+ // "ec2:UnassignPrivateIpAddresses",
+ // "ec2:AssignPrivateIpAddresses",
+ // "ec2:UnassignIpv6Addresses",
+ // "ec2:AssignIpv6Addresses",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeNetworkInterfaces"
+ // ],
+ // "Resource": "*"
+ // }
+ // ]
+ // }
+ NetworkARN string `json:"networkARN"`
+
+ // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Action": [
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeImages",
+ // "ec2:DescribeRegions",
+ // "ec2:DescribeRouteTables",
+ // "ec2:DescribeSecurityGroups",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeVolumes",
+ // "ec2:CreateSecurityGroup",
+ // "ec2:CreateTags",
+ // "ec2:CreateVolume",
+ // "ec2:ModifyInstanceAttribute",
+ // "ec2:ModifyVolume",
+ // "ec2:AttachVolume",
+ // "ec2:AuthorizeSecurityGroupIngress",
+ // "ec2:CreateRoute",
+ // "ec2:DeleteRoute",
+ // "ec2:DeleteSecurityGroup",
+ // "ec2:DeleteVolume",
+ // "ec2:DetachVolume",
+ // "ec2:RevokeSecurityGroupIngress",
+ // "ec2:DescribeVpcs",
+ // "elasticloadbalancing:AddTags",
+ // "elasticloadbalancing:AttachLoadBalancerToSubnets",
+ // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
+ // "elasticloadbalancing:CreateLoadBalancer",
+ // "elasticloadbalancing:CreateLoadBalancerPolicy",
+ // "elasticloadbalancing:CreateLoadBalancerListeners",
+ // "elasticloadbalancing:ConfigureHealthCheck",
+ // "elasticloadbalancing:DeleteLoadBalancer",
+ // "elasticloadbalancing:DeleteLoadBalancerListeners",
+ // "elasticloadbalancing:DescribeLoadBalancers",
+ // "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ // "elasticloadbalancing:DetachLoadBalancerFromSubnets",
+ // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
+ // "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ // "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
+ // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
+ // "elasticloadbalancing:AddTags",
+ // "elasticloadbalancing:CreateListener",
+ // "elasticloadbalancing:CreateTargetGroup",
+ // "elasticloadbalancing:DeleteListener",
+ // "elasticloadbalancing:DeleteTargetGroup",
+ // "elasticloadbalancing:DescribeListeners",
+ // "elasticloadbalancing:DescribeLoadBalancerPolicies",
+ // "elasticloadbalancing:DescribeTargetGroups",
+ // "elasticloadbalancing:DescribeTargetHealth",
+ // "elasticloadbalancing:ModifyListener",
+ // "elasticloadbalancing:ModifyTargetGroup",
+ // "elasticloadbalancing:RegisterTargets",
+ // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
+ // "iam:CreateServiceLinkedRole",
+ // "kms:DescribeKey"
+ // ],
+ // "Resource": [
+ // "*"
+ // ],
+ // "Effect": "Allow"
+ // }
+ // ]
+ // }
+ // +immutable
+ KubeCloudControllerARN string `json:"kubeCloudControllerARN"`
+
+ // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Action": [
+ // "ec2:AllocateAddress",
+ // "ec2:AssociateRouteTable",
+ // "ec2:AttachInternetGateway",
+ // "ec2:AuthorizeSecurityGroupIngress",
+ // "ec2:CreateInternetGateway",
+ // "ec2:CreateNatGateway",
+ // "ec2:CreateRoute",
+ // "ec2:CreateRouteTable",
+ // "ec2:CreateSecurityGroup",
+ // "ec2:CreateSubnet",
+ // "ec2:CreateTags",
+ // "ec2:DeleteInternetGateway",
+ // "ec2:DeleteNatGateway",
+ // "ec2:DeleteRouteTable",
+ // "ec2:DeleteSecurityGroup",
+ // "ec2:DeleteSubnet",
+ // "ec2:DeleteTags",
+ // "ec2:DescribeAccountAttributes",
+ // "ec2:DescribeAddresses",
+ // "ec2:DescribeAvailabilityZones",
+ // "ec2:DescribeImages",
+ // "ec2:DescribeInstances",
+ // "ec2:DescribeInternetGateways",
+ // "ec2:DescribeNatGateways",
+ // "ec2:DescribeNetworkInterfaces",
+ // "ec2:DescribeNetworkInterfaceAttribute",
+ // "ec2:DescribeRouteTables",
+ // "ec2:DescribeSecurityGroups",
+ // "ec2:DescribeSubnets",
+ // "ec2:DescribeVpcs",
+ // "ec2:DescribeVpcAttribute",
+ // "ec2:DescribeVolumes",
+ // "ec2:DetachInternetGateway",
+ // "ec2:DisassociateRouteTable",
+ // "ec2:DisassociateAddress",
+ // "ec2:ModifyInstanceAttribute",
+ // "ec2:ModifyNetworkInterfaceAttribute",
+ // "ec2:ModifySubnetAttribute",
+ // "ec2:ReleaseAddress",
+ // "ec2:RevokeSecurityGroupIngress",
+ // "ec2:RunInstances",
+ // "ec2:TerminateInstances",
+ // "tag:GetResources",
+ // "ec2:CreateLaunchTemplate",
+ // "ec2:CreateLaunchTemplateVersion",
+ // "ec2:DescribeLaunchTemplates",
+ // "ec2:DescribeLaunchTemplateVersions",
+ // "ec2:DeleteLaunchTemplate",
+ // "ec2:DeleteLaunchTemplateVersions"
+ // ],
+ // "Resource": [
+ // "*"
+ // ],
+ // "Effect": "Allow"
+ // },
+ // {
+ // "Condition": {
+ // "StringLike": {
+ // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com"
+ // }
+ // },
+ // "Action": [
+ // "iam:CreateServiceLinkedRole"
+ // ],
+ // "Resource": [
+ // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing"
+ // ],
+ // "Effect": "Allow"
+ // },
+ // {
+ // "Action": [
+ // "iam:PassRole"
+ // ],
+ // "Resource": [
+ // "arn:*:iam::*:role/*-worker-role"
+ // ],
+ // "Effect": "Allow"
+ // }
+ // ]
+ // }
+ //
+ // +immutable
+ NodePoolManagementARN string `json:"nodePoolManagementARN"`
+
+ // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator.
+ //
+ // The following is an example of a valid policy document:
+ //
+ // {
+ // "Version": "2012-10-17",
+ // "Statement": [
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "ec2:CreateVpcEndpoint",
+ // "ec2:DescribeVpcEndpoints",
+ // "ec2:ModifyVpcEndpoint",
+ // "ec2:DeleteVpcEndpoints",
+ // "ec2:CreateTags",
+ // "route53:ListHostedZones"
+ // ],
+ // "Resource": "*"
+ // },
+ // {
+ // "Effect": "Allow",
+ // "Action": [
+ // "route53:ChangeResourceRecordSets",
+ // "route53:ListResourceRecordSets"
+ // ],
+ // "Resource": "arn:aws:route53:::%s"
+ // }
+ // ]
+ // }
+ // +immutable
+ ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"`
+}
+
+// AWSServiceEndpoint stores the configuration for services to
+// override existing defaults of AWS Services.
+type AWSServiceEndpoint struct {
+ // Name is the name of the AWS service.
+ // This must be provided and cannot be empty.
+ Name string `json:"name"`
+
+ // URL is fully qualified URI with scheme https, that overrides the default generated
+ // endpoint for a client.
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Pattern=`^https://`
+ URL string `json:"url"`
+}
+
+type AzurePlatformSpec struct {
+ Credentials corev1.LocalObjectReference `json:"credentials"`
+ Location string `json:"location"`
+ ResourceGroupName string `json:"resourceGroup"`
+ VnetName string `json:"vnetName"`
+ VnetID string `json:"vnetID"`
+ SubnetName string `json:"subnetName"`
+ SubscriptionID string `json:"subscriptionID"`
+ MachineIdentityID string `json:"machineIdentityID"`
+ SecurityGroupName string `json:"securityGroupName"`
+}
+
+// Release represents the metadata for an OCP release payload image.
+type Release struct {
+ // Image is the image pullspec of an OCP release payload image.
+ //
+ // +kubebuilder:validation:Pattern=^(\w+\S+)$
+ Image string `json:"image"`
+}
+
+// ClusterAutoscaling specifies auto-scaling behavior that applies to all
+// NodePools associated with a control plane.
+type ClusterAutoscaling struct {
+ // MaxNodesTotal is the maximum allowable number of nodes across all NodePools
+ // for a HostedCluster. The autoscaler will not grow the cluster beyond this
+ // number.
+ //
+ // +kubebuilder:validation:Minimum=0
+ MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"`
+
+ // MaxPodGracePeriod is the maximum seconds to wait for graceful pod
+ // termination before scaling down a NodePool. The default is 600 seconds.
+ //
+ // +kubebuilder:validation:Minimum=0
+ MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"`
+
+ // MaxNodeProvisionTime is the maximum time to wait for node provisioning
+ // before considering the provisioning to be unsuccessful, expressed as a Go
+ // duration string. The default is 15 minutes.
+ //
+ // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$
+ MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"`
+
+ // PodPriorityThreshold enables users to schedule "best-effort" pods, which
+ // shouldn't trigger autoscaler actions, but only run when there are spare
+ // resources available. The default is -10.
+ //
+ // See the following for more details:
+ // https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption
+ //
+ // +optional
+ PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"`
+}
+
+// EtcdManagementType is a enum specifying the strategy for managing the cluster's etcd instance
+// +kubebuilder:validation:Enum=Managed;Unmanaged
+type EtcdManagementType string
+
+const (
+ // Managed means HyperShift should provision and operator the etcd cluster
+ // automatically.
+ Managed EtcdManagementType = "Managed"
+
+ // Unmanaged means HyperShift will not provision or manage the etcd cluster,
+ // and the user is responsible for doing so.
+ Unmanaged EtcdManagementType = "Unmanaged"
+)
+
+// EtcdSpec specifies configuration for a control plane etcd cluster.
+type EtcdSpec struct {
+ // ManagementType defines how the etcd cluster is managed.
+ //
+ // +unionDiscriminator
+ // +immutable
+ ManagementType EtcdManagementType `json:"managementType"`
+
+ // Managed specifies the behavior of an etcd cluster managed by HyperShift.
+ //
+ // +optional
+ // +immutable
+ Managed *ManagedEtcdSpec `json:"managed,omitempty"`
+
+ // Unmanaged specifies configuration which enables the control plane to
+ // integrate with an eternally managed etcd cluster.
+ //
+ // +optional
+ // +immutable
+ Unmanaged *UnmanagedEtcdSpec `json:"unmanaged,omitempty"`
+}
+
+// ManagedEtcdSpec specifies the behavior of an etcd cluster managed by
+// HyperShift.
+type ManagedEtcdSpec struct {
+ // Storage specifies how etcd data is persisted.
+ Storage ManagedEtcdStorageSpec `json:"storage"`
+}
+
+// ManagedEtcdStorageType is a storage type for an etcd cluster.
+//
+// +kubebuilder:validation:Enum=PersistentVolume
+type ManagedEtcdStorageType string
+
+const (
+ // PersistentVolumeEtcdStorage uses PersistentVolumes for etcd storage.
+ PersistentVolumeEtcdStorage ManagedEtcdStorageType = "PersistentVolume"
+)
+
+var (
+ DefaultPersistentVolumeEtcdStorageSize resource.Quantity = resource.MustParse("4Gi")
+)
+
+// ManagedEtcdStorageSpec describes the storage configuration for etcd data.
+type ManagedEtcdStorageSpec struct {
+ // Type is the kind of persistent storage implementation to use for etcd.
+ //
+ // +immutable
+ // +unionDiscriminator
+ Type ManagedEtcdStorageType `json:"type"`
+
+ // PersistentVolume is the configuration for PersistentVolume etcd storage.
+ // With this implementation, a PersistentVolume will be allocated for every
+ // etcd member (either 1 or 3 depending on the HostedCluster control plane
+ // availability configuration).
+ //
+ // +optional
+ PersistentVolume *PersistentVolumeEtcdStorageSpec `json:"persistentVolume,omitempty"`
+
+ // RestoreSnapshotURL allows an optional list of URLs to be provided where
+ // an etcd snapshot can be downloaded, for example a pre-signed URL
+ // referencing a storage service, one URL per replica.
+ // This snapshot will be restored on initial startup, only when the etcd PV
+ // is empty.
+ //
+ // +optional
+ // +immutable
+ RestoreSnapshotURL []string `json:"restoreSnapshotURL"`
+}
+
+// PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume
+// etcd storage.
+type PersistentVolumeEtcdStorageSpec struct {
+ // StorageClassName is the StorageClass of the data volume for each etcd member.
+ //
+ // See https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1.
+ //
+ // +optional
+ // +immutable
+ StorageClassName *string `json:"storageClassName,omitempty"`
+
+ // Size is the minimum size of the data volume for each etcd member.
+ //
+ // +optional
+ // +kubebuilder:default="4Gi"
+ Size *resource.Quantity `json:"size,omitempty"`
+}
+
+// UnmanagedEtcdSpec specifies configuration which enables the control plane to
+// integrate with an eternally managed etcd cluster.
+type UnmanagedEtcdSpec struct {
+ // Endpoint is the full etcd cluster client endpoint URL. For example:
+ //
+ // https://etcd-client:2379
+ //
+ // If the URL uses an HTTPS scheme, the TLS field is required.
+ //
+ // +kubebuilder:validation:Pattern=`^https://`
+ Endpoint string `json:"endpoint"`
+
+ // TLS specifies TLS configuration for HTTPS etcd client endpoints.
+ TLS EtcdTLSConfig `json:"tls"`
+}
+
+// EtcdTLSConfig specifies TLS configuration for HTTPS etcd client endpoints.
+type EtcdTLSConfig struct {
+ // ClientSecret refers to a secret for client mTLS authentication with the etcd cluster. It
+ // may have the following key/value pairs:
+ //
+ // etcd-client-ca.crt: Certificate Authority value
+ // etcd-client.crt: Client certificate value
+ // etcd-client.key: Client certificate key value
+ ClientSecret corev1.LocalObjectReference `json:"clientSecret"`
+}
+
+// SecretEncryptionType defines the type of kube secret encryption being used.
+// +kubebuilder:validation:Enum=kms;aescbc
+type SecretEncryptionType string
+
+const (
+ // KMS integrates with a cloud provider's key management service to do secret encryption
+ KMS SecretEncryptionType = "kms"
+ // AESCBC uses AES-CBC with PKCS#7 padding to do secret encryption
+ AESCBC SecretEncryptionType = "aescbc"
+)
+
+// SecretEncryptionSpec contains metadata about the kubernetes secret encryption strategy being used for the
+// cluster when applicable.
+type SecretEncryptionSpec struct {
+ // Type defines the type of kube secret encryption being used
+ // +unionDiscriminator
+ Type SecretEncryptionType `json:"type"`
+
+ // KMS defines metadata about the kms secret encryption strategy
+ // +optional
+ KMS *KMSSpec `json:"kms,omitempty"`
+
+ // AESCBC defines metadata about the AESCBC secret encryption strategy
+ // +optional
+ AESCBC *AESCBCSpec `json:"aescbc,omitempty"`
+}
+
+// KMSProvider defines the supported KMS providers
+// +kubebuilder:validation:Enum=IBMCloud;AWS
+type KMSProvider string
+
+const (
+ IBMCloud KMSProvider = "IBMCloud"
+ AWS KMSProvider = "AWS"
+)
+
+// KMSSpec defines metadata about the kms secret encryption strategy
+type KMSSpec struct {
+ // Provider defines the KMS provider
+ // +unionDiscriminator
+ Provider KMSProvider `json:"provider"`
+ // IBMCloud defines metadata for the IBM Cloud KMS encryption strategy
+ // +optional
+ IBMCloud *IBMCloudKMSSpec `json:"ibmcloud,omitempty"`
+ // AWS defines metadata about the configuration of the AWS KMS Secret Encryption provider
+ // +optional
+ AWS *AWSKMSSpec `json:"aws,omitempty"`
+}
+
+// IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy
+type IBMCloudKMSSpec struct {
+ // Region is the IBM Cloud region
+ Region string `json:"region"`
+ // Auth defines metadata for how authentication is done with IBM Cloud KMS
+ Auth IBMCloudKMSAuthSpec `json:"auth"`
+ // KeyList defines the list of keys used for data encryption
+ KeyList []IBMCloudKMSKeyEntry `json:"keyList"`
+}
+
+// IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key
+type IBMCloudKMSKeyEntry struct {
+ // CRKID is the customer rook key id
+ CRKID string `json:"crkID"`
+ // InstanceID is the id for the key protect instance
+ InstanceID string `json:"instanceID"`
+ // CorrelationID is an identifier used to track all api call usage from hypershift
+ CorrelationID string `json:"correlationID"`
+ // URL is the url to call key protect apis over
+ // +kubebuilder:validation:Pattern=`^https://`
+ URL string `json:"url"`
+ // KeyVersion is a unique number associated with the key. The number increments whenever a new
+ // key is enabled for data encryption.
+ KeyVersion int `json:"keyVersion"`
+}
+
+// IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS
+type IBMCloudKMSAuthSpec struct {
+ // Type defines the IBM Cloud KMS authentication strategy
+ // +unionDiscriminator
+ Type IBMCloudKMSAuthType `json:"type"`
+ // Unmanaged defines the auth metadata the customer provides to interact with IBM Cloud KMS
+ // +optional
+ Unmanaged *IBMCloudKMSUnmanagedAuthSpec `json:"unmanaged,omitempty"`
+ // Managed defines metadata around the service to service authentication strategy for the IBM Cloud
+ // KMS system (all provider managed).
+ // +optional
+ Managed *IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"`
+}
+
+// IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy
+// +kubebuilder:validation:Enum=Managed;Unmanaged
+type IBMCloudKMSAuthType string
+
+const (
+ // IBMCloudKMSManagedAuth defines the KMS authentication strategy where the IKS/ROKS platform uses
+ // service to service auth to call IBM Cloud KMS APIs (no customer credentials requried)
+ IBMCloudKMSManagedAuth IBMCloudKMSAuthType = "Managed"
+ // IBMCloudKMSUnmanagedAuth defines the KMS authentication strategy where a customer supplies IBM Cloud
+ // authentication to interact with IBM Cloud KMS APIs
+ IBMCloudKMSUnmanagedAuth IBMCloudKMSAuthType = "Unmanaged"
+)
+
+// IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS
+type IBMCloudKMSUnmanagedAuthSpec struct {
+ // Credentials should reference a secret with a key field of IBMCloudIAMAPIKeySecretKey that contains a apikey to
+ // call IBM Cloud KMS APIs
+ Credentials corev1.LocalObjectReference `json:"credentials"`
+}
+
+// IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud
+// KMS system (all provider managed).
+type IBMCloudKMSManagedAuthSpec struct {
+}
+
+// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider
+type AWSKMSSpec struct {
+ // Region contains the AWS region
+ Region string `json:"region"`
+ // ActiveKey defines the active key used to encrypt new secrets
+ ActiveKey AWSKMSKeyEntry `json:"activeKey"`
+ // BackupKey defines the old key during the rotation process so previously created
+ // secrets can continue to be decrypted until they are all re-encrypted with the active key.
+ // +optional
+ BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"`
+ // Auth defines metadata about the management of credentials used to interact with AWS KMS
+ Auth AWSKMSAuthSpec `json:"auth"`
+}
+
+// AWSKMSAuthSpec defines metadata about the management of credentials used to interact with AWS KMS
+type AWSKMSAuthSpec struct {
+ // Credentials contains the name of the secret that holds the aws credentials that can be used
+ // to make the necessary KMS calls. It should at key AWSCredentialsFileSecretKey contain the
+ // aws credentials file that can be used to configure AWS SDKs
+ Credentials corev1.LocalObjectReference `json:"credentials"`
+}
+
+// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS
+type AWSKMSKeyEntry struct {
+ // ARN is the Amazon Resource Name for the encryption key
+ // +kubebuilder:validation:Pattern=`^arn:`
+ ARN string `json:"arn"`
+}
+
+// AESCBCSpec defines metadata about the AESCBC secret encryption strategy
+type AESCBCSpec struct {
+ // ActiveKey defines the active key used to encrypt new secrets
+ ActiveKey corev1.LocalObjectReference `json:"activeKey"`
+ // BackupKey defines the old key during the rotation process so previously created
+ // secrets can continue to be decrypted until they are all re-encrypted with the active key.
+ // +optional
+ BackupKey *corev1.LocalObjectReference `json:"backupKey,omitempty"`
+}
+
+// HostedClusterStatus is the latest observed status of a HostedCluster.
+type HostedClusterStatus struct {
+ // Version is the status of the release version applied to the
+ // HostedCluster.
+ // +optional
+ Version *ClusterVersionStatus `json:"version,omitempty"`
+
+ // KubeConfig is a reference to the secret containing the default kubeconfig
+ // for the cluster.
+ // +optional
+ KubeConfig *corev1.LocalObjectReference `json:"kubeconfig,omitempty"`
+
+ // KubeadminPassword is a reference to the secret that contains the initial
+ // kubeadmin user password for the guest cluster.
+ // +optional
+ KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"`
+
+ // IgnitionEndpoint is the endpoint injected in the ign config userdata.
+ // It exposes the config for instances to become kubernetes nodes.
+ // +optional
+ IgnitionEndpoint string `json:"ignitionEndpoint,omitempty"`
+
+ // ControlPlaneEndpoint contains the endpoint information by which
+ // external clients can access the control plane. This is populated
+ // after the infrastructure is ready.
+ // +kubebuilder:validation:Optional
+ ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"`
+
+ // OAuthCallbackURLTemplate contains a template for the URL to use as a callback
+ // for identity providers. The [identity-provider-name] placeholder must be replaced
+ // with the name of an identity provider defined on the HostedCluster.
+ // This is populated after the infrastructure is ready.
+ // +kubebuilder:validation:Optional
+ OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"`
+
+ // Conditions represents the latest available observations of a control
+ // plane's current state.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// ClusterVersionStatus reports the status of the cluster versioning,
+// including any upgrades that are in progress. The current field will
+// be set to whichever version the cluster is reconciling to, and the
+// conditions array will report whether the update succeeded, is in
+// progress, or is failing.
+// +k8s:deepcopy-gen=true
+type ClusterVersionStatus struct {
+ // desired is the version that the cluster is reconciling towards.
+ // If the cluster is not yet fully initialized desired will be set
+ // with the information available, which may be an image or a tag.
+ Desired configv1.Release `json:"desired"`
+
+ // history contains a list of the most recent versions applied to the cluster.
+ // This value may be empty during cluster startup, and then will be updated
+ // when a new update is being applied. The newest update is first in the
+ // list and it is ordered by recency. Updates in the history have state
+ // Completed if the rollout completed - if an update was failing or halfway
+ // applied the state will be Partial. Only a limited amount of update history
+ // is preserved.
+ //
+ // +optional
+ History []configv1.UpdateHistory `json:"history,omitempty"`
+
+ // observedGeneration reports which version of the spec is being synced.
+ // If this value is not equal to metadata.generation, then the desired
+ // and conditions fields may represent a previous version.
+ ObservedGeneration int64 `json:"observedGeneration"`
+
+ // availableUpdates contains updates recommended for this
+ // cluster. Updates which appear in conditionalUpdates but not in
+ // availableUpdates may expose this cluster to known issues. This list
+ // may be empty if no updates are recommended, if the update service
+ // is unavailable, or if an invalid channel has been specified.
+ // +nullable
+ // +kubebuilder:validation:Required
+ // +required
+ AvailableUpdates []configv1.Release `json:"availableUpdates"`
+
+ // conditionalUpdates contains the list of updates that may be
+ // recommended for this cluster if it meets specific required
+ // conditions. Consumers interested in the set of updates that are
+ // actually recommended for this cluster should use
+ // availableUpdates. This list may be empty if no updates are
+ // recommended, if the update service is unavailable, or if an empty
+ // or invalid channel has been specified.
+ // +listType=atomic
+ // +optional
+ ConditionalUpdates []configv1.ConditionalUpdate `json:"conditionalUpdates,omitempty"`
+}
+
+// ClusterConfiguration specifies configuration for individual OCP components in the
+// cluster, represented as embedded resources that correspond to the openshift
+// configuration API.
+//
+// The API for individual configuration items is at:
+// https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html
+type ClusterConfiguration struct {
+ // APIServer holds configuration (like serving certificates, client CA and CORS domains)
+ // shared by all API servers in the system, among them especially kube-apiserver
+ // and openshift-apiserver.
+ // +optional
+ APIServer *configv1.APIServerSpec `json:"apiServer,omitempty"`
+
+ // Authentication specifies cluster-wide settings for authentication (like OAuth and
+ // webhook token authenticators).
+ // +optional
+ Authentication *configv1.AuthenticationSpec `json:"authentication,omitempty"`
+
+ // FeatureGate holds cluster-wide information about feature gates.
+ // +optional
+ FeatureGate *configv1.FeatureGateSpec `json:"featureGate,omitempty"`
+
+ // Image governs policies related to imagestream imports and runtime configuration
+ // for external registries. It allows cluster admins to configure which registries
+ // OpenShift is allowed to import images from, extra CA trust bundles for external
+ // registries, and policies to block or allow registry hostnames.
+ // When exposing OpenShift's image registry to the public, this also lets cluster
+ // admins specify the external hostname.
+ // +optional
+ Image *configv1.ImageSpec `json:"image,omitempty"`
+
+ // Ingress holds cluster-wide information about ingress, including the default ingress domain
+ // used for routes.
+ // +optional
+ Ingress *configv1.IngressSpec `json:"ingress,omitempty"`
+
+ // Network holds cluster-wide information about the network. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
+ // Please view network.spec for an explanation on what applies when configuring this resource.
+ // TODO (csrwng): Add validation here to exclude changes that conflict with networking settings in the HostedCluster.Spec.Networking field.
+ // +optional
+ Network *configv1.NetworkSpec `json:"network,omitempty"`
+
+ // OAuth holds cluster-wide information about OAuth.
+ // It is used to configure the integrated OAuth server.
+ // This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.
+ // +optional
+ OAuth *configv1.OAuthSpec `json:"oauth,omitempty"`
+
+ // Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
+ // and influence its placement decisions. The canonical name for this config is `cluster`.
+ // +optional
+ Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"`
+
+ // Proxy holds cluster-wide information on how to configure default proxies for the cluster.
+ // +optional
+ Proxy *configv1.ProxySpec `json:"proxy,omitempty"`
+}
+
+// +genclient
+
+// HostedCluster is the primary representation of a HyperShift cluster and encapsulates
+// the control plane and common data plane configuration. Creating a HostedCluster
+// results in a fully functional OpenShift control plane with no attached nodes.
+// To support workloads (e.g. pods), a HostedCluster may have one or more associated
+// NodePool resources.
+//
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=hostedclusters,shortName=hc;hcs,scope=Namespaced
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version.history[?(@.state==\"Completed\")].version",description="Version"
+// +kubebuilder:printcolumn:name="KubeConfig",type="string",JSONPath=".status.kubeconfig.name",description="KubeConfig Secret"
+// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.version.history[?(@.state!=\"\")].state",description="Progress"
+// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available"
+// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message"
+type HostedCluster struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the desired behavior of the HostedCluster.
+ Spec HostedClusterSpec `json:"spec,omitempty"`
+
+ // Status is the latest observed status of the HostedCluster.
+ Status HostedClusterStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// HostedClusterList contains a list of HostedCluster
+type HostedClusterList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []HostedCluster `json:"items"`
+}
diff --git a/api/v1beta1/nodepool_types.go b/api/v1beta1/nodepool_types.go
new file mode 100644
index 00000000000..274fe56ca8d
--- /dev/null
+++ b/api/v1beta1/nodepool_types.go
@@ -0,0 +1,763 @@
+package v1beta1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+ NodePoolValidGeneratedPayloadConditionType = "ValidGeneratedPayload"
+ NodePoolValidPlatformImageType = "ValidPlatformImage"
+ NodePoolValidHostedClusterConditionType = "ValidHostedCluster"
+ NodePoolValidReleaseImageConditionType = "ValidReleaseImage"
+ NodePoolValidMachineConfigConditionType = "ValidMachineConfig"
+ NodePoolValidTuningConfigConditionType = "ValidTuningConfig"
+ NodePoolUpdateManagementEnabledConditionType = "UpdateManagementEnabled"
+ NodePoolAutoscalingEnabledConditionType = "AutoscalingEnabled"
+ NodePoolReadyConditionType = "Ready"
+ NodePoolReconciliationActiveConditionType = "ReconciliationActive"
+ NodePoolAutorepairEnabledConditionType = "AutorepairEnabled"
+ NodePoolUpdatingVersionConditionType = "UpdatingVersion"
+ NodePoolUpdatingConfigConditionType = "UpdatingConfig"
+ NodePoolAsExpectedConditionReason = "AsExpected"
+ NodePoolValidationFailedConditionReason = "ValidationFailed"
+ NodePoolInplaceUpgradeFailedConditionReason = "InplaceUpgradeFailed"
+ NodePoolNotFoundReason = "NotFound"
+ NodePoolFailedToGetReason = "FailedToGet"
+ // NodePoolLabel is used to label Nodes.
+ NodePoolLabel = "hypershift.openshift.io/nodePool"
+)
+
+// The following are reasons for the IgnitionEndpointAvailable condition.
+const (
+ IgnitionEndpointMissingReason string = "IgnitionEndpointMissing"
+ IgnitionCACertMissingReason string = "IgnitionCACertMissing"
+)
+
+const (
+ // IgnitionServerTokenExpirationTimestampAnnotation holds the time that a ignition token expires and should be
+ // removed from the cluster.
+ IgnitionServerTokenExpirationTimestampAnnotation = "hypershift.openshift.io/ignition-token-expiration-timestamp"
+)
+
+func init() {
+ SchemeBuilder.Register(&NodePool{})
+ SchemeBuilder.Register(&NodePoolList{})
+}
+
+// +genclient
+
+// NodePool is a scalable set of worker nodes attached to a HostedCluster.
+// NodePool machine architectures are uniform within a given pool, and are
+// independent of the control plane’s underlying machine architecture.
+//
+// +kubebuilder:resource:path=nodepools,shortName=np;nps,scope=Namespaced
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas
+// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.clusterName",description="Cluster"
+// +kubebuilder:printcolumn:name="Desired Nodes",type="integer",JSONPath=".spec.replicas",description="Desired Nodes"
+// +kubebuilder:printcolumn:name="Current Nodes",type="integer",JSONPath=".status.replicas",description="Available Nodes"
+// +kubebuilder:printcolumn:name="Autoscaling",type="string",JSONPath=".status.conditions[?(@.type==\"AutoscalingEnabled\")].status",description="Autoscaling Enabled"
+// +kubebuilder:printcolumn:name="Autorepair",type="string",JSONPath=".status.conditions[?(@.type==\"AutorepairEnabled\")].status",description="Node Autorepair Enabled"
+// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Current version"
+// +kubebuilder:printcolumn:name="UpdatingVersion",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingVersion\")].status",description="UpdatingVersion in progress"
+// +kubebuilder:printcolumn:name="UpdatingConfig",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingConfig\")].status",description="UpdatingConfig in progress"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Message"
+type NodePool struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the desired behavior of the NodePool.
+ Spec NodePoolSpec `json:"spec,omitempty"`
+
+ // Status is the latest observed status of the NodePool.
+ Status NodePoolStatus `json:"status,omitempty"`
+}
+
+// NodePoolSpec is the desired behavior of a NodePool.
+type NodePoolSpec struct {
+ // ClusterName is the name of the HostedCluster this NodePool belongs to.
+ //
+ // TODO(dan): Should this be a LocalObjectReference?
+ //
+ // +immutable
+ ClusterName string `json:"clusterName"`
+
+ // Release specifies the OCP release used for the NodePool. This informs the
+ // ignition configuration for machines, as well as other platform specific
+ // machine properties (e.g. an AMI on the AWS platform).
+ Release Release `json:"release"`
+
+ // Platform specifies the underlying infrastructure provider for the NodePool
+ // and is used to configure platform specific behavior.
+ //
+ // +immutable
+ Platform NodePoolPlatform `json:"platform"`
+
+ // Replicas is the desired number of nodes the pool should maintain. If
+ // unset, the default value is 0.
+ //
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty"`
+
+ // Management specifies behavior for managing nodes in the pool, such as
+ // upgrade strategies and auto-repair behaviors.
+ Management NodePoolManagement `json:"management"`
+
+ // Autoscaling specifies auto-scaling behavior for the NodePool.
+ //
+ // +optional
+ AutoScaling *NodePoolAutoScaling `json:"autoScaling,omitempty"`
+
+ // Config is a list of references to ConfigMaps containing serialized
+ // MachineConfig resources to be injected into the ignition configurations of
+ // nodes in the NodePool. The MachineConfig API schema is defined here:
+ //
+ // https://github.com/openshift/machine-config-operator/blob/18963e4f8fe66e8c513ca4b131620760a414997f/pkg/apis/machineconfiguration.openshift.io/v1/types.go#L185
+ //
+ // Each ConfigMap must have a single key named "config" whose value is the
+ // JSON or YAML of a serialized MachineConfig.
+ // +kubebuilder:validation:Optional
+ Config []corev1.LocalObjectReference `json:"config,omitempty"`
+
+ // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.
+ // The default value is 0, meaning that the node can be drained without any time limitations.
+ // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
+ // TODO (alberto): Today changing this field will trigger a recreate rolling update, which kind of defeats
+ // the purpose of the change. In future we plan to propagate this field in-place.
+ // https://github.com/kubernetes-sigs/cluster-api/issues/5880
+ // +optional
+ NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"`
+
+ // PausedUntil is a field that can be used to pause reconciliation on a resource.
+ // Either a date can be provided in RFC3339 format or a boolean. If a date is
+ // provided: reconciliation is paused on the resource until that date. If the boolean true is
+ // provided: reconciliation is paused on the resource until the field is removed.
+ // +optional
+ PausedUntil *string `json:"pausedUntil,omitempty"`
+
+ // TuningConfig is a list of references to ConfigMaps containing serialized
+ // Tuned resources to define the tuning configuration to be applied to
+ // nodes in the NodePool. The Tuned API is defined here:
+ //
+ // https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go
+ //
+ // Each ConfigMap must have a single key named "tuned" whose value is the
+ // JSON or YAML of a serialized Tuned.
+ // +kubebuilder:validation:Optional
+ TuningConfig []corev1.LocalObjectReference `json:"tuningConfig,omitempty"`
+}
+
+// NodePoolStatus is the latest observed status of a NodePool.
+type NodePoolStatus struct {
+ // Replicas is the latest observed number of nodes in the pool.
+ //
+ // +optional
+ Replicas int32 `json:"replicas"`
+
+ // Version is the semantic version of the latest applied release specified by
+ // the NodePool.
+ //
+ // +kubebuilder:validation:Optional
+ Version string `json:"version,omitempty"`
+
+ // Conditions represents the latest available observations of the node pool's
+ // current state.
+ // +optional
+ Conditions []NodePoolCondition `json:"conditions,omitempty"`
+}
+
+// NodePoolList contains a list of NodePools.
+//
+// +kubebuilder:object:root=true
+type NodePoolList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []NodePool `json:"items"`
+}
+
+// UpgradeType is a type of high-level upgrade behavior nodes in a NodePool.
+type UpgradeType string
+
+const (
+ // UpgradeTypeReplace is a strategy which replaces nodes using surge node
+ // capacity.
+ UpgradeTypeReplace = UpgradeType("Replace")
+
+ // UpgradeTypeInPlace is a strategy which replaces nodes in-place with no
+ // additional node capacity requirements.
+ UpgradeTypeInPlace = UpgradeType("InPlace")
+)
+
+// UpgradeStrategy is a specific strategy for upgrading nodes in a NodePool.
+type UpgradeStrategy string
+
+const (
+ // UpgradeStrategyRollingUpdate means use a rolling update for nodes.
+ UpgradeStrategyRollingUpdate = UpgradeStrategy("RollingUpdate")
+
+ // UpgradeStrategyOnDelete replaces old nodes when the deletion of the
+ // associated node instances are completed.
+ UpgradeStrategyOnDelete = UpgradeStrategy("OnDelete")
+)
+
+// ReplaceUpgrade specifies upgrade behavior that replaces existing nodes
+// according to a given strategy.
+type ReplaceUpgrade struct {
+ // Strategy is the node replacement strategy for nodes in the pool.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Enum=RollingUpdate;OnDelete
+ Strategy UpgradeStrategy `json:"strategy"`
+
+ // RollingUpdate specifies a rolling update strategy which upgrades nodes by
+ // creating new nodes and deleting the old ones.
+ //
+ // +kubebuilder:validation:Optional
+ RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
+}
+
+// RollingUpdate specifies a rolling update strategy which upgrades nodes by
+// creating new nodes and deleting the old ones.
+type RollingUpdate struct {
+ // MaxUnavailable is the maximum number of nodes that can be unavailable
+ // during the update.
+ //
+ // Value can be an absolute number (ex: 5) or a percentage of desired nodes
+ // (ex: 10%).
+ //
+ // Absolute number is calculated from percentage by rounding down.
+ //
+ // This can not be 0 if MaxSurge is 0.
+ //
+ // Defaults to 0.
+ //
+ // Example: when this is set to 30%, old nodes can be deleted down to 70% of
+ // desired nodes immediately when the rolling update starts. Once new nodes
+ // are ready, more old nodes be deleted, followed by provisioning new nodes,
+ // ensuring that the total number of nodes available at all times during the
+ // update is at least 70% of desired nodes.
+ //
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+
+ // MaxSurge is the maximum number of nodes that can be provisioned above the
+ // desired number of nodes.
+ //
+ // Value can be an absolute number (ex: 5) or a percentage of desired nodes
+ // (ex: 10%).
+ //
+ // Absolute number is calculated from percentage by rounding up.
+ //
+ // This can not be 0 if MaxUnavailable is 0.
+ //
+ // Defaults to 1.
+ //
+ // Example: when this is set to 30%, new nodes can be provisioned immediately
+ // when the rolling update starts, such that the total number of old and new
+ // nodes do not exceed 130% of desired nodes. Once old nodes have been
+ // deleted, new nodes can be provisioned, ensuring that total number of nodes
+ // running at any time during the update is at most 130% of desired nodes.
+ //
+ // +optional
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+}
+
+// InPlaceUpgrade specifies an upgrade strategy which upgrades nodes in-place
+// without any new nodes being created or any old nodes being deleted.
+type InPlaceUpgrade struct {
+ // MaxUnavailable is the maximum number of nodes that can be unavailable
+ // during the update.
+ //
+ // Value can be an absolute number (ex: 5) or a percentage of desired nodes
+ // (ex: 10%).
+ //
+ // Absolute number is calculated from percentage by rounding down.
+ //
+ // Defaults to 1.
+ //
+ // Example: when this is set to 30%, a max of 30% of the nodes can be made
+ // unschedulable/unavailable immediately when the update starts. Once a set
+ // of nodes is updated, more nodes can be made unschedulable for update,
+ // ensuring that the total number of nodes schedulable at all times during
+ // the update is at least 70% of desired nodes.
+ //
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+}
+
+// NodePoolManagement specifies behavior for managing nodes in a NodePool, such
+// as upgrade strategies and auto-repair behaviors.
+type NodePoolManagement struct {
+ // UpgradeType specifies the type of strategy for handling upgrades.
+ //
+ // +kubebuilder:validation:Enum=Replace;InPlace
+ UpgradeType UpgradeType `json:"upgradeType"`
+
+ // Replace is the configuration for rolling upgrades.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default={strategy: "RollingUpdate", rollingUpdate: {maxSurge: 1, maxUnavailable: 0 }}
+ Replace *ReplaceUpgrade `json:"replace,omitempty"`
+
+ // InPlace is the configuration for in-place upgrades.
+ //
+ // +kubebuilder:validation:Optional
+ InPlace *InPlaceUpgrade `json:"inPlace,omitempty"`
+
+ // AutoRepair specifies whether health checks should be enabled for machines
+ // in the NodePool. The default is false.
+ //
+ // +optional
+ AutoRepair bool `json:"autoRepair"`
+}
+
+// NodePoolAutoScaling specifies auto-scaling behavior for a NodePool.
+type NodePoolAutoScaling struct {
+ // Min is the minimum number of nodes to maintain in the pool. Must be >= 1.
+ //
+ // +kubebuilder:validation:Minimum=1
+ Min int32 `json:"min"`
+
+ // Max is the maximum number of nodes allowed in the pool. Must be >= 1.
+ //
+ // +kubebuilder:validation:Minimum=1
+ Max int32 `json:"max"`
+}
+
+// NodePoolPlatform specifies the underlying infrastructure provider for the
+// NodePool and is used to configure platform specific behavior.
+type NodePoolPlatform struct {
+ // Type specifies the platform name.
+ //
+ // +unionDiscriminator
+ // +immutable
+ Type PlatformType `json:"type"`
+
+ // AWS specifies the configuration used when operating on AWS.
+ //
+ // +optional
+ AWS *AWSNodePoolPlatform `json:"aws,omitempty"`
+
+ // IBMCloud defines IBMCloud specific settings for components
+ IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`
+
+ // Kubevirt specifies the configuration used when operating on KubeVirt platform.
+ //
+ // +optional
+ Kubevirt *KubevirtNodePoolPlatform `json:"kubevirt,omitempty"`
+
+ // Agent specifies the configuration used when using Agent platform.
+ //
+ // +optional
+ Agent *AgentNodePoolPlatform `json:"agent,omitempty"`
+
+ Azure *AzureNodePoolPlatform `json:"azure,omitempty"`
+
+ // PowerVS specifies the configuration used when using IBMCloud PowerVS platform.
+ //
+ // +optional
+ PowerVS *PowerVSNodePoolPlatform `json:"powervs,omitempty"`
+}
+
+// PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform
+type PowerVSNodePoolProcType string
+
+func (p *PowerVSNodePoolProcType) String() string {
+ return string(*p)
+}
+
+func (p *PowerVSNodePoolProcType) Set(s string) error {
+ switch s {
+ case string(PowerVSNodePoolSharedProcType), string(PowerVSNodePoolCappedProcType), string(PowerVSNodePoolDedicatedProcType):
+ *p = PowerVSNodePoolProcType(s)
+ return nil
+ default:
+ return fmt.Errorf("unknown processor type used %s", s)
+ }
+}
+
+func (p *PowerVSNodePoolProcType) Type() string {
+ return "PowerVSNodePoolProcType"
+}
+
+const (
+ // PowerVSNodePoolDedicatedProcType defines dedicated processor type
+ PowerVSNodePoolDedicatedProcType = PowerVSNodePoolProcType("dedicated")
+
+ // PowerVSNodePoolSharedProcType defines shared processor type
+ PowerVSNodePoolSharedProcType = PowerVSNodePoolProcType("shared")
+
+ // PowerVSNodePoolCappedProcType defines capped processor type
+ PowerVSNodePoolCappedProcType = PowerVSNodePoolProcType("capped")
+)
+
+// PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform
+type PowerVSNodePoolStorageType string
+
+// PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform
+type PowerVSNodePoolImageDeletePolicy string
+
+// PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating
+// on IBMCloud PowerVS platform.
+type PowerVSNodePoolPlatform struct {
+ // SystemType is the System type used to host the instance.
+ // systemType determines the number of cores and memory that is available.
+ // Few of the supported SystemTypes are s922,e880,e980.
+ // e880 systemType available only in Dallas Datacenters.
+ // e980 systemType available in Datacenters except Dallas and Washington.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default. The current default is s922 which is generally available.
+ //
+ // +optional
+ // +kubebuilder:default=s922
+ SystemType string `json:"systemType,omitempty"`
+
+ // ProcessorType is the VM instance processor type.
+ // It must be set to one of the following values: Dedicated, Capped or Shared.
+ //
+ // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core.
+ // Shared: Shared among other clients.
+ // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement.
+ //
+ // if the processorType is selected as Dedicated, then Processors value cannot be fractional.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default. The current default is Shared.
+ //
+ // +kubebuilder:default=shared
+ // +kubebuilder:validation:Enum=dedicated;shared;capped
+ // +optional
+ ProcessorType PowerVSNodePoolProcType `json:"processorType,omitempty"`
+
+ // Processors is the number of virtual processors in a virtual machine.
+ // when the processorType is selected as Dedicated the processors value cannot be fractional.
+ // maximum value for the Processors depends on the selected SystemType.
+ // when SystemType is set to e880 or e980 maximum Processors value is 143.
+ // when SystemType is set to s922 maximum Processors value is 15.
+ // minimum value for Processors depends on the selected ProcessorType.
+ // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5.
+ // when ProcessorType is set as Dedicated, The minimum processors is 1.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default. The default is set based on the selected ProcessorType.
+ // when ProcessorType selected as Dedicated, the default is set to 1.
+ // when ProcessorType selected as Shared or Capped, the default is set to 0.5.
+ //
+ // +optional
+ // +kubebuilder:default="0.5"
+ Processors intstr.IntOrString `json:"processors,omitempty"`
+
+ // MemoryGiB is the size of a virtual machine's memory, in GiB.
+ // maximum value for the MemoryGiB depends on the selected SystemType.
+ // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB.
+ // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB.
+ // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB.
+ // The minimum memory is 32 GiB.
+ //
+ // When omitted, this means the user has no opinion and the platform is left to choose a reasonable
+ // default. The current default is 32.
+ //
+ // +optional
+ // +kubebuilder:default=32
+ MemoryGiB int32 `json:"memoryGiB,omitempty"`
+
+ // Image used for deploying the nodes. If unspecified, the default
+ // is chosen based on the NodePool release payload image.
+ //
+ // +optional
+ Image *PowerVSResourceReference `json:"image,omitempty"`
+
+ // StorageType for the image and nodes, this will be ignored if Image is specified.
+ // The storage tiers in PowerVS are based on I/O operations per second (IOPS).
+ // It means that the performance of your storage volumes is limited to the maximum number of IOPS based on volume size and storage tier.
+ // Although, the exact numbers might change over time, the Tier 3 storage is currently set to 3 IOPS/GB, and the Tier 1 storage is currently set to 10 IOPS/GB.
+ //
+ // The default is tier1
+ //
+ // +kubebuilder:default=tier1
+ // +kubebuilder:validation:Enum=tier1;tier3
+ // +optional
+ StorageType PowerVSNodePoolStorageType `json:"storageType,omitempty"`
+
+ // ImageDeletePolicy is policy for the image deletion.
+ //
+ // delete: delete the image from the infrastructure.
+ // retain: delete the image from the openshift but retain in the infrastructure.
+ //
+ // The default is delete
+ //
+ // +kubebuilder:default=delete
+ // +kubebuilder:validation:Enum=delete;retain
+ // +optional
+ ImageDeletePolicy PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"`
+}
+
+// KubevirtCompute contains values associated with the virtual compute hardware requested for the VM.
+type KubevirtCompute struct {
+ // Memory represents how much guest memory the VM should have
+ //
+ // +optional
+ // +kubebuilder:default="4Gi"
+ Memory *resource.Quantity `json:"memory"`
+
+ // Cores represents how many cores the guest VM should have
+ //
+ // +optional
+ // +kubebuilder:default=2
+ Cores *uint32 `json:"cores"`
+}
+
+// +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany;ReadOnly;ReadWriteOncePod
+type PersistentVolumeAccessMode corev1.PersistentVolumeAccessMode
+
+// KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM.
+type KubevirtPersistentVolume struct {
+ // Size is the size of the persistent storage volume
+ //
+ // +optional
+ // +kubebuilder:default="16Gi"
+ Size *resource.Quantity `json:"size"`
+ // StorageClass is the storageClass used for the underlying PVC that hosts the volume
+ //
+ // +optional
+ StorageClass *string `json:"storageClass,omitempty"`
+ // AccessModes is an array that contains the desired Access Modes the root volume should have.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
+ //
+ // +optional
+ AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+}
+
+// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from.
+type KubevirtRootVolume struct {
+ // Image represents what rhcos image to use for the node pool
+ //
+ // +optional
+ Image *KubevirtDiskImage `json:"diskImage,omitempty"`
+
+ // KubevirtVolume represents of type of storage to run the image on
+ KubevirtVolume `json:",inline"`
+}
+
+// KubevirtVolumeType is a specific supported KubeVirt volumes
+//
+// +kubebuilder:validation:Enum=Persistent
+type KubevirtVolumeType string
+
+const (
+ // KubevirtVolumeTypePersistent represents persistent volume for kubevirt VMs
+ KubevirtVolumeTypePersistent KubevirtVolumeType = "Persistent"
+)
+
+// KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume
+type KubevirtVolume struct {
+ // Type represents the type of storage to associate with the kubevirt VMs.
+ //
+ // +optional
+ // +unionDiscriminator
+ // +kubebuilder:default=Persistent
+ Type KubevirtVolumeType `json:"type"`
+
+ // Persistent volume type means the VM's storage is backed by a PVC
+ // VMs that use persistent volumes can survive disruption events like restart and eviction
+ // This is the default type used when no storage type is defined.
+ //
+ // +optional
+ Persistent *KubevirtPersistentVolume `json:"persistent,omitempty"`
+}
+
+// KubevirtDiskImage contains values representing where the rhcos image is located
+type KubevirtDiskImage struct {
+ // ContainerDiskImage is a string representing the container image that holds the root disk
+ //
+ // +optional
+ ContainerDiskImage *string `json:"containerDiskImage,omitempty"`
+}
+
+// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating
+// on KubeVirt platform.
+type KubevirtNodePoolPlatform struct {
+ // RootVolume represents values associated with the VM volume that will host rhcos
+ RootVolume *KubevirtRootVolume `json:"rootVolume"`
+
+ // Compute contains values representing the virtual hardware requested for the VM
+ //
+ // +optional
+ // +kubebuilder:default={memory: "4Gi", cores: 2}
+ Compute *KubevirtCompute `json:"compute"`
+}
+
+// AWSNodePoolPlatform specifies the configuration of a NodePool when operating
+// on AWS.
+type AWSNodePoolPlatform struct {
+ // InstanceType is an ec2 instance type for node instances (e.g. m5.large).
+ InstanceType string `json:"instanceType"`
+
+ // InstanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses.
+ InstanceProfile string `json:"instanceProfile,omitempty"`
+
+ // Subnet is the subnet to use for node instances.
+ //
+ // +optional
+ Subnet *AWSResourceReference `json:"subnet,omitempty"`
+
+ // AMI is the image id to use for node instances. If unspecified, the default
+ // is chosen based on the NodePool release payload image.
+ //
+ // +optional
+ AMI string `json:"ami,omitempty"`
+
+ // SecurityGroups is an optional set of security groups to associate with node
+ // instances.
+ //
+ // +optional
+ SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"`
+
+ // RootVolume specifies configuration for the root volume of node instances.
+ //
+ // +optional
+ RootVolume *Volume `json:"rootVolume,omitempty"`
+
+ // ResourceTags is an optional list of additional tags to apply to AWS node
+ // instances.
+ //
+ // These will be merged with HostedCluster scoped tags, and HostedCluster tags
+ // take precedence in case of conflicts.
+ //
+ // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for
+ // information on tagging AWS resources. AWS supports a maximum of 50 tags per
+ // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available
+ // for the user.
+ //
+ // +kubebuilder:validation:MaxItems=25
+ // +optional
+ ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"`
+}
+
+// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
+// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+// a validation error.
+type AWSResourceReference struct {
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+
+ // ARN of resource
+ // +optional
+ ARN *string `json:"arn,omitempty"`
+
+ // Filters is a set of key/value pairs used to identify a resource
+ // They are applied according to the rules defined by the AWS API:
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html
+ // +optional
+ Filters []Filter `json:"filters,omitempty"`
+}
+
+// Filter is a filter used to identify an AWS resource
+type Filter struct {
+ // Name of the filter. Filter names are case-sensitive.
+ Name string `json:"name"`
+
+ // Values includes one or more filter values. Filter values are case-sensitive.
+ Values []string `json:"values"`
+}
+
+// Volume specifies the configuration options for node instance storage devices.
+type Volume struct {
+ // Size specifies size (in Gi) of the storage device.
+ //
+ // Must be greater than the image snapshot size or 8 (whichever is greater).
+ //
+ // +kubebuilder:validation:Minimum=8
+ Size int64 `json:"size"`
+
+ // Type is the type of the volume.
+ Type string `json:"type"`
+
+ // IOPS is the number of IOPS requested for the disk. This is only valid
+ // for type io1.
+ //
+ // +optional
+ IOPS int64 `json:"iops,omitempty"`
+}
+
+// AgentNodePoolPlatform specifies the configuration of a NodePool when operating
+// on the Agent platform.
+type AgentNodePoolPlatform struct {
+ // AgentLabelSelector contains labels that must be set on an Agent in order to
+ // be selected for a Machine.
+ // +optional
+ AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"`
+}
+
+type AzureNodePoolPlatform struct {
+ VMSize string `json:"vmsize"`
+ // ImageID is the id of the image to boot from. If unset, the default image at the location below will be used:
+ // subscription/$subscriptionID/resourceGroups/$resourceGroupName/providers/Microsoft.Compute/images/rhcos.x86_64.vhd
+ // +optional
+ ImageID string `json:"imageID,omitempty"`
+ // +kubebuilder:default:=120
+ // +kubebuilder:validation:Minimum=16
+ // +optional
+ DiskSizeGB int32 `json:"diskSizeGB,omitempty"`
+ // DiskStorageAccountType is the disk storage account type to use. Valid values are:
+ // * Standard_LRS: HDD
+ // * StandardSSD_LRS: Standard SSD
+ // * Premium_LRS: Premium SDD
+ // * UltraSSD_LRS: Ultra SDD
+ //
+ // Defaults to Premium_LRS. For more details, visit the Azure documentation:
+ // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison
+ //
+ // +kubebuilder:default:=Premium_LRS
+ // +kubebuilder:validation:Enum=Standard_LRS;StandardSSD_LRS;Premium_LRS;UltraSSD_LRS
+ // +optional
+ DiskStorageAccountType string `json:"diskStorageAccountType,omitempty"`
+ // AvailabilityZone of the nodepool. Must not be specified for clusters
+ // in a location that does not support AvailabilityZone.
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+}
+
+// We define our own condition type since metav1.Condition has validation
+// for Reason that might be broken by what we bubble up from CAPI.
+// NodePoolCondition defines an observation of NodePool resource operational state.
+type NodePoolCondition struct {
+ // Type of condition in CamelCase or in foo.example.com/CamelCase.
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ // can be useful (see .node.status.conditions), the ability to deconflict is important.
+ Type string `json:"type"`
+
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status"`
+
+ // Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ // understand the current situation and act accordingly.
+ // The Severity field MUST be set only when Status=False.
+ // +optional
+ Severity string `json:"severity,omitempty"`
+
+ // Last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when
+ // the API field changed is acceptable.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime"`
+
+ // The reason for the condition's last transition in CamelCase.
+ // The specific API may choose whether or not this field is considered a guaranteed API.
+ // This field may not be empty.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+
+ // A human readable message indicating details about the transition.
+ // This field may be empty.
+ // +optional
+ Message string `json:"message,omitempty"`
+
+ // +kubebuilder:validation:Minimum=0
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+}
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..9f0a7f2bd2d
--- /dev/null
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,2114 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AESCBCSpec) DeepCopyInto(out *AESCBCSpec) {
+ *out = *in
+ out.ActiveKey = in.ActiveKey
+ if in.BackupKey != nil {
+ in, out := &in.BackupKey, &out.BackupKey
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESCBCSpec.
+func (in *AESCBCSpec) DeepCopy() *AESCBCSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AESCBCSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint.
+func (in *APIEndpoint) DeepCopy() *APIEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(APIEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerNetworking) DeepCopyInto(out *APIServerNetworking) {
+ *out = *in
+ if in.AdvertiseAddress != nil {
+ in, out := &in.AdvertiseAddress, &out.AdvertiseAddress
+ *out = new(string)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int32)
+ **out = **in
+ }
+ if in.AllowedCIDRBlocks != nil {
+ in, out := &in.AllowedCIDRBlocks, &out.AllowedCIDRBlocks
+ *out = make([]CIDRBlock, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNetworking.
+func (in *APIServerNetworking) DeepCopy() *APIServerNetworking {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerNetworking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSCloudProviderConfig) DeepCopyInto(out *AWSCloudProviderConfig) {
+ *out = *in
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(AWSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCloudProviderConfig.
+func (in *AWSCloudProviderConfig) DeepCopy() *AWSCloudProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSCloudProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSEndpointService) DeepCopyInto(out *AWSEndpointService) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointService.
+func (in *AWSEndpointService) DeepCopy() *AWSEndpointService {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSEndpointService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSEndpointService) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSEndpointServiceList) DeepCopyInto(out *AWSEndpointServiceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSEndpointService, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceList.
+func (in *AWSEndpointServiceList) DeepCopy() *AWSEndpointServiceList {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSEndpointServiceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSEndpointServiceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSEndpointServiceSpec) DeepCopyInto(out *AWSEndpointServiceSpec) {
+ *out = *in
+ if in.SubnetIDs != nil {
+ in, out := &in.SubnetIDs, &out.SubnetIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AWSResourceTag, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceSpec.
+func (in *AWSEndpointServiceSpec) DeepCopy() *AWSEndpointServiceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSEndpointServiceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSEndpointServiceStatus) DeepCopyInto(out *AWSEndpointServiceStatus) {
+ *out = *in
+ if in.DNSNames != nil {
+ in, out := &in.DNSNames, &out.DNSNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceStatus.
+func (in *AWSEndpointServiceStatus) DeepCopy() *AWSEndpointServiceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSEndpointServiceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSKMSAuthSpec) DeepCopyInto(out *AWSKMSAuthSpec) {
+ *out = *in
+ out.Credentials = in.Credentials
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSAuthSpec.
+func (in *AWSKMSAuthSpec) DeepCopy() *AWSKMSAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSKMSAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSKMSKeyEntry) DeepCopyInto(out *AWSKMSKeyEntry) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSKeyEntry.
+func (in *AWSKMSKeyEntry) DeepCopy() *AWSKMSKeyEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSKMSKeyEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSKMSSpec) DeepCopyInto(out *AWSKMSSpec) {
+ *out = *in
+ out.ActiveKey = in.ActiveKey
+ if in.BackupKey != nil {
+ in, out := &in.BackupKey, &out.BackupKey
+ *out = new(AWSKMSKeyEntry)
+ **out = **in
+ }
+ out.Auth = in.Auth
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSSpec.
+func (in *AWSKMSSpec) DeepCopy() *AWSKMSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSKMSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSNodePoolPlatform) DeepCopyInto(out *AWSNodePoolPlatform) {
+ *out = *in
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(AWSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]AWSResourceReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RootVolume != nil {
+ in, out := &in.RootVolume, &out.RootVolume
+ *out = new(Volume)
+ **out = **in
+ }
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AWSResourceTag, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNodePoolPlatform.
+func (in *AWSNodePoolPlatform) DeepCopy() *AWSNodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSNodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) {
+ *out = *in
+ if in.CloudProviderConfig != nil {
+ in, out := &in.CloudProviderConfig, &out.CloudProviderConfig
+ *out = new(AWSCloudProviderConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]AWSServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ out.RolesRef = in.RolesRef
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AWSResourceTag, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec.
+func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.ARN != nil {
+ in, out := &in.ARN, &out.ARN
+ *out = new(string)
+ **out = **in
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]Filter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference.
+func (in *AWSResourceReference) DeepCopy() *AWSResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag.
+func (in *AWSResourceTag) DeepCopy() *AWSResourceTag {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSRoleCredentials) DeepCopyInto(out *AWSRoleCredentials) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRoleCredentials.
+func (in *AWSRoleCredentials) DeepCopy() *AWSRoleCredentials {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSRoleCredentials)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSRolesRef) DeepCopyInto(out *AWSRolesRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRolesRef.
+func (in *AWSRolesRef) DeepCopy() *AWSRolesRef {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSRolesRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint.
+func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSServiceEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AgentNodePoolPlatform) DeepCopyInto(out *AgentNodePoolPlatform) {
+ *out = *in
+ if in.AgentLabelSelector != nil {
+ in, out := &in.AgentLabelSelector, &out.AgentLabelSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNodePoolPlatform.
+func (in *AgentNodePoolPlatform) DeepCopy() *AgentNodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentNodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AgentPlatformSpec) DeepCopyInto(out *AgentPlatformSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentPlatformSpec.
+func (in *AgentPlatformSpec) DeepCopy() *AgentPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureNodePoolPlatform) DeepCopyInto(out *AzureNodePoolPlatform) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNodePoolPlatform.
+func (in *AzureNodePoolPlatform) DeepCopy() *AzureNodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureNodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) {
+ *out = *in
+ out.Credentials = in.Credentials
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec.
+func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AzurePlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterAutoscaling) DeepCopyInto(out *ClusterAutoscaling) {
+ *out = *in
+ if in.MaxNodesTotal != nil {
+ in, out := &in.MaxNodesTotal, &out.MaxNodesTotal
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxPodGracePeriod != nil {
+ in, out := &in.MaxPodGracePeriod, &out.MaxPodGracePeriod
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PodPriorityThreshold != nil {
+ in, out := &in.PodPriorityThreshold, &out.PodPriorityThreshold
+ *out = new(int32)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaling.
+func (in *ClusterAutoscaling) DeepCopy() *ClusterAutoscaling {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterAutoscaling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) {
+ *out = *in
+ if in.APIServer != nil {
+ in, out := &in.APIServer, &out.APIServer
+ *out = new(configv1.APIServerSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Authentication != nil {
+ in, out := &in.Authentication, &out.Authentication
+ *out = new(configv1.AuthenticationSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FeatureGate != nil {
+ in, out := &in.FeatureGate, &out.FeatureGate
+ *out = new(configv1.FeatureGateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(configv1.ImageSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(configv1.IngressSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Network != nil {
+ in, out := &in.Network, &out.Network
+ *out = new(configv1.NetworkSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OAuth != nil {
+ in, out := &in.OAuth, &out.OAuth
+ *out = new(configv1.OAuthSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Scheduler != nil {
+ in, out := &in.Scheduler, &out.Scheduler
+ *out = new(configv1.SchedulerSpec)
+ **out = **in
+ }
+ if in.Proxy != nil {
+ in, out := &in.Proxy, &out.Proxy
+ *out = new(configv1.ProxySpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration.
+func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ in.CIDR.DeepCopyInto(&out.CIDR)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworking) DeepCopyInto(out *ClusterNetworking) {
+ *out = *in
+ if in.MachineNetwork != nil {
+ in, out := &in.MachineNetwork, &out.MachineNetwork
+ *out = make([]MachineNetworkEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]ServiceNetworkEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.APIServer != nil {
+ in, out := &in.APIServer, &out.APIServer
+ *out = new(APIServerNetworking)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworking.
+func (in *ClusterNetworking) DeepCopy() *ClusterNetworking {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
+ *out = *in
+ in.Desired.DeepCopyInto(&out.Desired)
+ if in.History != nil {
+ in, out := &in.History, &out.History
+ *out = make([]configv1.UpdateHistory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AvailableUpdates != nil {
+ in, out := &in.AvailableUpdates, &out.AvailableUpdates
+ *out = make([]configv1.Release, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ConditionalUpdates != nil {
+ in, out := &in.ConditionalUpdates, &out.ConditionalUpdates
+ *out = make([]configv1.ConditionalUpdate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
+func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
+func (in *DNSSpec) DeepCopy() *DNSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) {
+ *out = *in
+ if in.Managed != nil {
+ in, out := &in.Managed, &out.Managed
+ *out = new(ManagedEtcdSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Unmanaged != nil {
+ in, out := &in.Unmanaged, &out.Unmanaged
+ *out = new(UnmanagedEtcdSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec.
+func (in *EtcdSpec) DeepCopy() *EtcdSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdTLSConfig) DeepCopyInto(out *EtcdTLSConfig) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdTLSConfig.
+func (in *EtcdTLSConfig) DeepCopy() *EtcdTLSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdTLSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Filter) DeepCopyInto(out *Filter) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
+ if in == nil {
+ return nil
+ }
+ out := new(Filter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedCluster) DeepCopyInto(out *HostedCluster) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedCluster.
+func (in *HostedCluster) DeepCopy() *HostedCluster {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedCluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostedCluster) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedClusterList) DeepCopyInto(out *HostedClusterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HostedCluster, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterList.
+func (in *HostedClusterList) DeepCopy() *HostedClusterList {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedClusterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostedClusterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedClusterSpec) DeepCopyInto(out *HostedClusterSpec) {
+ *out = *in
+ out.Release = in.Release
+ in.Platform.DeepCopyInto(&out.Platform)
+ out.DNS = in.DNS
+ in.Networking.DeepCopyInto(&out.Networking)
+ in.Autoscaling.DeepCopyInto(&out.Autoscaling)
+ in.Etcd.DeepCopyInto(&out.Etcd)
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = make([]ServicePublishingStrategyMapping, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.PullSecret = in.PullSecret
+ out.SSHKey = in.SSHKey
+ if in.ServiceAccountSigningKey != nil {
+ in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Configuration != nil {
+ in, out := &in.Configuration, &out.Configuration
+ *out = new(ClusterConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AuditWebhook != nil {
+ in, out := &in.AuditWebhook, &out.AuditWebhook
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.ImageContentSources != nil {
+ in, out := &in.ImageContentSources, &out.ImageContentSources
+ *out = make([]ImageContentSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AdditionalTrustBundle != nil {
+ in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.SecretEncryption != nil {
+ in, out := &in.SecretEncryption, &out.SecretEncryption
+ *out = new(SecretEncryptionSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PausedUntil != nil {
+ in, out := &in.PausedUntil, &out.PausedUntil
+ *out = new(string)
+ **out = **in
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterSpec.
+func (in *HostedClusterSpec) DeepCopy() *HostedClusterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedClusterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedClusterStatus) DeepCopyInto(out *HostedClusterStatus) {
+ *out = *in
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(ClusterVersionStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KubeConfig != nil {
+ in, out := &in.KubeConfig, &out.KubeConfig
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.KubeadminPassword != nil {
+ in, out := &in.KubeadminPassword, &out.KubeadminPassword
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterStatus.
+func (in *HostedClusterStatus) DeepCopy() *HostedClusterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedClusterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedControlPlane) DeepCopyInto(out *HostedControlPlane) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlane.
+func (in *HostedControlPlane) DeepCopy() *HostedControlPlane {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedControlPlane)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostedControlPlane) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedControlPlaneList) DeepCopyInto(out *HostedControlPlaneList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HostedControlPlane, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneList.
+func (in *HostedControlPlaneList) DeepCopy() *HostedControlPlaneList {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedControlPlaneList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostedControlPlaneList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedControlPlaneSpec) DeepCopyInto(out *HostedControlPlaneSpec) {
+ *out = *in
+ out.PullSecret = in.PullSecret
+ in.Networking.DeepCopyInto(&out.Networking)
+ out.SSHKey = in.SSHKey
+ in.Platform.DeepCopyInto(&out.Platform)
+ out.DNS = in.DNS
+ if in.ServiceAccountSigningKey != nil {
+ in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.KubeConfig != nil {
+ in, out := &in.KubeConfig, &out.KubeConfig
+ *out = new(KubeconfigSecretRef)
+ **out = **in
+ }
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = make([]ServicePublishingStrategyMapping, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AuditWebhook != nil {
+ in, out := &in.AuditWebhook, &out.AuditWebhook
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ in.Etcd.DeepCopyInto(&out.Etcd)
+ if in.Configuration != nil {
+ in, out := &in.Configuration, &out.Configuration
+ *out = new(ClusterConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImageContentSources != nil {
+ in, out := &in.ImageContentSources, &out.ImageContentSources
+ *out = make([]ImageContentSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AdditionalTrustBundle != nil {
+ in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.SecretEncryption != nil {
+ in, out := &in.SecretEncryption, &out.SecretEncryption
+ *out = new(SecretEncryptionSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PausedUntil != nil {
+ in, out := &in.PausedUntil, &out.PausedUntil
+ *out = new(string)
+ **out = **in
+ }
+ in.Autoscaling.DeepCopyInto(&out.Autoscaling)
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneSpec.
+func (in *HostedControlPlaneSpec) DeepCopy() *HostedControlPlaneSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedControlPlaneSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostedControlPlaneStatus) DeepCopyInto(out *HostedControlPlaneStatus) {
+ *out = *in
+ if in.ExternalManagedControlPlane != nil {
+ in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane
+ *out = new(bool)
+ **out = **in
+ }
+ out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
+ if in.VersionStatus != nil {
+ in, out := &in.VersionStatus, &out.VersionStatus
+ *out = new(ClusterVersionStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LastReleaseImageTransitionTime != nil {
+ in, out := &in.LastReleaseImageTransitionTime, &out.LastReleaseImageTransitionTime
+ *out = (*in).DeepCopy()
+ }
+ if in.KubeConfig != nil {
+ in, out := &in.KubeConfig, &out.KubeConfig
+ *out = new(KubeconfigSecretRef)
+ **out = **in
+ }
+ if in.KubeadminPassword != nil {
+ in, out := &in.KubeadminPassword, &out.KubeadminPassword
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneStatus.
+func (in *HostedControlPlaneStatus) DeepCopy() *HostedControlPlaneStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HostedControlPlaneStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudKMSAuthSpec) DeepCopyInto(out *IBMCloudKMSAuthSpec) {
+ *out = *in
+ if in.Unmanaged != nil {
+ in, out := &in.Unmanaged, &out.Unmanaged
+ *out = new(IBMCloudKMSUnmanagedAuthSpec)
+ **out = **in
+ }
+ if in.Managed != nil {
+ in, out := &in.Managed, &out.Managed
+ *out = new(IBMCloudKMSManagedAuthSpec)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSAuthSpec.
+func (in *IBMCloudKMSAuthSpec) DeepCopy() *IBMCloudKMSAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudKMSAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudKMSKeyEntry) DeepCopyInto(out *IBMCloudKMSKeyEntry) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSKeyEntry.
+func (in *IBMCloudKMSKeyEntry) DeepCopy() *IBMCloudKMSKeyEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudKMSKeyEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudKMSManagedAuthSpec) DeepCopyInto(out *IBMCloudKMSManagedAuthSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSManagedAuthSpec.
+func (in *IBMCloudKMSManagedAuthSpec) DeepCopy() *IBMCloudKMSManagedAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudKMSManagedAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudKMSSpec) DeepCopyInto(out *IBMCloudKMSSpec) {
+ *out = *in
+ in.Auth.DeepCopyInto(&out.Auth)
+ if in.KeyList != nil {
+ in, out := &in.KeyList, &out.KeyList
+ *out = make([]IBMCloudKMSKeyEntry, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSSpec.
+func (in *IBMCloudKMSSpec) DeepCopy() *IBMCloudKMSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudKMSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopyInto(out *IBMCloudKMSUnmanagedAuthSpec) {
+ *out = *in
+ out.Credentials = in.Credentials
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSUnmanagedAuthSpec.
+func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopy() *IBMCloudKMSUnmanagedAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudKMSUnmanagedAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec.
+func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentSource) DeepCopyInto(out *ImageContentSource) {
+ *out = *in
+ if in.Mirrors != nil {
+ in, out := &in.Mirrors, &out.Mirrors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSource.
+func (in *ImageContentSource) DeepCopy() *ImageContentSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InPlaceUpgrade) DeepCopyInto(out *InPlaceUpgrade) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpgrade.
+func (in *InPlaceUpgrade) DeepCopy() *InPlaceUpgrade {
+ if in == nil {
+ return nil
+ }
+ out := new(InPlaceUpgrade)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KMSSpec) DeepCopyInto(out *KMSSpec) {
+ *out = *in
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudKMSSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSKMSSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSSpec.
+func (in *KMSSpec) DeepCopy() *KMSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KMSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeconfigSecretRef) DeepCopyInto(out *KubeconfigSecretRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSecretRef.
+func (in *KubeconfigSecretRef) DeepCopy() *KubeconfigSecretRef {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeconfigSecretRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtCompute) DeepCopyInto(out *KubevirtCompute) {
+ *out = *in
+ if in.Memory != nil {
+ in, out := &in.Memory, &out.Memory
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.Cores != nil {
+ in, out := &in.Cores, &out.Cores
+ *out = new(uint32)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCompute.
+func (in *KubevirtCompute) DeepCopy() *KubevirtCompute {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtCompute)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtDiskImage) DeepCopyInto(out *KubevirtDiskImage) {
+ *out = *in
+ if in.ContainerDiskImage != nil {
+ in, out := &in.ContainerDiskImage, &out.ContainerDiskImage
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtDiskImage.
+func (in *KubevirtDiskImage) DeepCopy() *KubevirtDiskImage {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtDiskImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtNodePoolPlatform) DeepCopyInto(out *KubevirtNodePoolPlatform) {
+ *out = *in
+ if in.RootVolume != nil {
+ in, out := &in.RootVolume, &out.RootVolume
+ *out = new(KubevirtRootVolume)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Compute != nil {
+ in, out := &in.Compute, &out.Compute
+ *out = new(KubevirtCompute)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodePoolPlatform.
+func (in *KubevirtNodePoolPlatform) DeepCopy() *KubevirtNodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtNodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtPersistentVolume) DeepCopyInto(out *KubevirtPersistentVolume) {
+ *out = *in
+ if in.Size != nil {
+ in, out := &in.Size, &out.Size
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ if in.StorageClass != nil {
+ in, out := &in.StorageClass, &out.StorageClass
+ *out = new(string)
+ **out = **in
+ }
+ if in.AccessModes != nil {
+ in, out := &in.AccessModes, &out.AccessModes
+ *out = make([]PersistentVolumeAccessMode, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPersistentVolume.
+func (in *KubevirtPersistentVolume) DeepCopy() *KubevirtPersistentVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtPersistentVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtRootVolume) DeepCopyInto(out *KubevirtRootVolume) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(KubevirtDiskImage)
+ (*in).DeepCopyInto(*out)
+ }
+ in.KubevirtVolume.DeepCopyInto(&out.KubevirtVolume)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtRootVolume.
+func (in *KubevirtRootVolume) DeepCopy() *KubevirtRootVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtRootVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtVolume) DeepCopyInto(out *KubevirtVolume) {
+ *out = *in
+ if in.Persistent != nil {
+ in, out := &in.Persistent, &out.Persistent
+ *out = new(KubevirtPersistentVolume)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtVolume.
+func (in *KubevirtVolume) DeepCopy() *KubevirtVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerPublishingStrategy) DeepCopyInto(out *LoadBalancerPublishingStrategy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerPublishingStrategy.
+func (in *LoadBalancerPublishingStrategy) DeepCopy() *LoadBalancerPublishingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancerPublishingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) {
+ *out = *in
+ in.CIDR.DeepCopyInto(&out.CIDR)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry.
+func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedEtcdSpec) DeepCopyInto(out *ManagedEtcdSpec) {
+ *out = *in
+ in.Storage.DeepCopyInto(&out.Storage)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdSpec.
+func (in *ManagedEtcdSpec) DeepCopy() *ManagedEtcdSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedEtcdSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedEtcdStorageSpec) DeepCopyInto(out *ManagedEtcdStorageSpec) {
+ *out = *in
+ if in.PersistentVolume != nil {
+ in, out := &in.PersistentVolume, &out.PersistentVolume
+ *out = new(PersistentVolumeEtcdStorageSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RestoreSnapshotURL != nil {
+ in, out := &in.RestoreSnapshotURL, &out.RestoreSnapshotURL
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdStorageSpec.
+func (in *ManagedEtcdStorageSpec) DeepCopy() *ManagedEtcdStorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedEtcdStorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePool) DeepCopyInto(out *NodePool) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePool.
+func (in *NodePool) DeepCopy() *NodePool {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodePool) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolAutoScaling) DeepCopyInto(out *NodePoolAutoScaling) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoScaling.
+func (in *NodePoolAutoScaling) DeepCopy() *NodePoolAutoScaling {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolAutoScaling)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolCondition) DeepCopyInto(out *NodePoolCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolCondition.
+func (in *NodePoolCondition) DeepCopy() *NodePoolCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolList) DeepCopyInto(out *NodePoolList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NodePool, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolList.
+func (in *NodePoolList) DeepCopy() *NodePoolList {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodePoolList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolManagement) DeepCopyInto(out *NodePoolManagement) {
+ *out = *in
+ if in.Replace != nil {
+ in, out := &in.Replace, &out.Replace
+ *out = new(ReplaceUpgrade)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.InPlace != nil {
+ in, out := &in.InPlace, &out.InPlace
+ *out = new(InPlaceUpgrade)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolManagement.
+func (in *NodePoolManagement) DeepCopy() *NodePoolManagement {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolManagement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolPlatform) DeepCopyInto(out *NodePoolPlatform) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSNodePoolPlatform)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudPlatformSpec)
+ **out = **in
+ }
+ if in.Kubevirt != nil {
+ in, out := &in.Kubevirt, &out.Kubevirt
+ *out = new(KubevirtNodePoolPlatform)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Agent != nil {
+ in, out := &in.Agent, &out.Agent
+ *out = new(AgentNodePoolPlatform)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzureNodePoolPlatform)
+ **out = **in
+ }
+ if in.PowerVS != nil {
+ in, out := &in.PowerVS, &out.PowerVS
+ *out = new(PowerVSNodePoolPlatform)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatform.
+func (in *NodePoolPlatform) DeepCopy() *NodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) {
+ *out = *in
+ out.Release = in.Release
+ in.Platform.DeepCopyInto(&out.Platform)
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ in.Management.DeepCopyInto(&out.Management)
+ if in.AutoScaling != nil {
+ in, out := &in.AutoScaling, &out.AutoScaling
+ *out = new(NodePoolAutoScaling)
+ **out = **in
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make([]corev1.LocalObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeDrainTimeout != nil {
+ in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout
+ *out = new(v1.Duration)
+ **out = **in
+ }
+ if in.PausedUntil != nil {
+ in, out := &in.PausedUntil, &out.PausedUntil
+ *out = new(string)
+ **out = **in
+ }
+ if in.TuningConfig != nil {
+ in, out := &in.TuningConfig, &out.TuningConfig
+ *out = make([]corev1.LocalObjectReference, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSpec.
+func (in *NodePoolSpec) DeepCopy() *NodePoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]NodePoolCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolStatus.
+func (in *NodePoolStatus) DeepCopy() *NodePoolStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePoolStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePortPublishingStrategy) DeepCopyInto(out *NodePortPublishingStrategy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortPublishingStrategy.
+func (in *NodePortPublishingStrategy) DeepCopy() *NodePortPublishingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePortPublishingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeEtcdStorageSpec) DeepCopyInto(out *PersistentVolumeEtcdStorageSpec) {
+ *out = *in
+ if in.StorageClassName != nil {
+ in, out := &in.StorageClassName, &out.StorageClassName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Size != nil {
+ in, out := &in.Size, &out.Size
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeEtcdStorageSpec.
+func (in *PersistentVolumeEtcdStorageSpec) DeepCopy() *PersistentVolumeEtcdStorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PersistentVolumeEtcdStorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Agent != nil {
+ in, out := &in.Agent, &out.Agent
+ *out = new(AgentPlatformSpec)
+ **out = **in
+ }
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudPlatformSpec)
+ **out = **in
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzurePlatformSpec)
+ **out = **in
+ }
+ if in.PowerVS != nil {
+ in, out := &in.PowerVS, &out.PowerVS
+ *out = new(PowerVSPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec.
+func (in *PlatformSpec) DeepCopy() *PlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSNodePoolPlatform) DeepCopyInto(out *PowerVSNodePoolPlatform) {
+ *out = *in
+ out.Processors = in.Processors
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(PowerVSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSNodePoolPlatform.
+func (in *PowerVSNodePoolPlatform) DeepCopy() *PowerVSNodePoolPlatform {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSNodePoolPlatform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) {
+ *out = *in
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(PowerVSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VPC != nil {
+ in, out := &in.VPC, &out.VPC
+ *out = new(PowerVSVPC)
+ **out = **in
+ }
+ out.KubeCloudControllerCreds = in.KubeCloudControllerCreds
+ out.NodePoolManagementCreds = in.NodePoolManagementCreds
+ out.IngressOperatorCloudCreds = in.IngressOperatorCloudCreds
+ out.StorageOperatorCloudCreds = in.StorageOperatorCloudCreds
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec.
+func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSResourceReference) DeepCopyInto(out *PowerVSResourceReference) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResourceReference.
+func (in *PowerVSResourceReference) DeepCopy() *PowerVSResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSVPC) DeepCopyInto(out *PowerVSVPC) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSVPC.
+func (in *PowerVSVPC) DeepCopy() *PowerVSVPC {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSVPC)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Release) DeepCopyInto(out *Release) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release.
+func (in *Release) DeepCopy() *Release {
+ if in == nil {
+ return nil
+ }
+ out := new(Release)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplaceUpgrade) DeepCopyInto(out *ReplaceUpgrade) {
+ *out = *in
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdate)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplaceUpgrade.
+func (in *ReplaceUpgrade) DeepCopy() *ReplaceUpgrade {
+ if in == nil {
+ return nil
+ }
+ out := new(ReplaceUpgrade)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
+ *out = *in
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
+func (in *RollingUpdate) DeepCopy() *RollingUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutePublishingStrategy) DeepCopyInto(out *RoutePublishingStrategy) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePublishingStrategy.
+func (in *RoutePublishingStrategy) DeepCopy() *RoutePublishingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutePublishingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretEncryptionSpec) DeepCopyInto(out *SecretEncryptionSpec) {
+ *out = *in
+ if in.KMS != nil {
+ in, out := &in.KMS, &out.KMS
+ *out = new(KMSSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AESCBC != nil {
+ in, out := &in.AESCBC, &out.AESCBC
+ *out = new(AESCBCSpec)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEncryptionSpec.
+func (in *SecretEncryptionSpec) DeepCopy() *SecretEncryptionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretEncryptionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceNetworkEntry) DeepCopyInto(out *ServiceNetworkEntry) {
+ *out = *in
+ in.CIDR.DeepCopyInto(&out.CIDR)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNetworkEntry.
+func (in *ServiceNetworkEntry) DeepCopy() *ServiceNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServicePublishingStrategy) DeepCopyInto(out *ServicePublishingStrategy) {
+ *out = *in
+ if in.NodePort != nil {
+ in, out := &in.NodePort, &out.NodePort
+ *out = new(NodePortPublishingStrategy)
+ **out = **in
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(LoadBalancerPublishingStrategy)
+ **out = **in
+ }
+ if in.Route != nil {
+ in, out := &in.Route, &out.Route
+ *out = new(RoutePublishingStrategy)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategy.
+func (in *ServicePublishingStrategy) DeepCopy() *ServicePublishingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(ServicePublishingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServicePublishingStrategyMapping) DeepCopyInto(out *ServicePublishingStrategyMapping) {
+ *out = *in
+ in.ServicePublishingStrategy.DeepCopyInto(&out.ServicePublishingStrategy)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategyMapping.
+func (in *ServicePublishingStrategyMapping) DeepCopy() *ServicePublishingStrategyMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(ServicePublishingStrategyMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UnmanagedEtcdSpec) DeepCopyInto(out *UnmanagedEtcdSpec) {
+ *out = *in
+ out.TLS = in.TLS
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedEtcdSpec.
+func (in *UnmanagedEtcdSpec) DeepCopy() *UnmanagedEtcdSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(UnmanagedEtcdSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+ if in == nil {
+ return nil
+ }
+ out := new(Volume)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/cmd/bastion/aws/create.go b/cmd/bastion/aws/create.go
index 22d7b4e00d9..7bef281479d 100644
--- a/cmd/bastion/aws/create.go
+++ b/cmd/bastion/aws/create.go
@@ -17,7 +17,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/cmd/util"
)
diff --git a/cmd/bastion/aws/destroy.go b/cmd/bastion/aws/destroy.go
index faa22e47e59..db196789629 100644
--- a/cmd/bastion/aws/destroy.go
+++ b/cmd/bastion/aws/destroy.go
@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/cmd/util"
)
diff --git a/cmd/cluster/aws/create.go b/cmd/cluster/aws/create.go
index bd9029f4ae8..f4213927de9 100644
--- a/cmd/cluster/aws/create.go
+++ b/cmd/cluster/aws/create.go
@@ -7,7 +7,7 @@ import (
"io/ioutil"
apifixtures "github.com/openshift/hypershift/api/fixtures"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/core"
awsinfra "github.com/openshift/hypershift/cmd/infra/aws"
"github.com/openshift/hypershift/cmd/util"
@@ -170,6 +170,7 @@ func applyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur
SubnetID: &outputZone.SubnetID,
})
}
+
exampleOptions.AWS = &apifixtures.ExampleAWSOptions{
Region: infra.Region,
Zones: zones,
diff --git a/cmd/cluster/core/create.go b/cmd/cluster/core/create.go
index dafb168eff6..04f6b83f3ae 100644
--- a/cmd/cluster/core/create.go
+++ b/cmd/cluster/core/create.go
@@ -25,7 +25,7 @@ import (
"github.com/blang/semver"
"github.com/go-logr/logr"
apifixtures "github.com/openshift/hypershift/api/fixtures"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/util"
"github.com/openshift/hypershift/cmd/version"
hyperapi "github.com/openshift/hypershift/support/api"
@@ -89,7 +89,7 @@ type PowerVSPlatformOptions struct {
// nodepool related options
SysType string
- ProcType string
+ ProcType hyperv1.PowerVSNodePoolProcType
Processors string
Memory int32
}
diff --git a/cmd/cluster/core/destroy.go b/cmd/cluster/core/destroy.go
index c2e781e17f1..510b368d440 100644
--- a/cmd/cluster/core/destroy.go
+++ b/cmd/cluster/core/destroy.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/go-logr/logr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
diff --git a/cmd/cluster/core/dump.go b/cmd/cluster/core/dump.go
index bc789ea1d6a..1d9899a7bde 100644
--- a/cmd/cluster/core/dump.go
+++ b/cmd/cluster/core/dump.go
@@ -31,7 +31,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/log"
"github.com/openshift/hypershift/cmd/util"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
diff --git a/cmd/cluster/powervs/create.go b/cmd/cluster/powervs/create.go
index 62f6b0dee31..8a62217ea56 100644
--- a/cmd/cluster/powervs/create.go
+++ b/cmd/cluster/powervs/create.go
@@ -45,7 +45,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command {
cmd.Flags().StringVar(&opts.PowerVSPlatform.VpcRegion, "vpc-region", opts.PowerVSPlatform.VpcRegion, "IBM Cloud VPC Region for VPC resources. Default is us-south")
cmd.Flags().StringVar(&opts.PowerVSPlatform.Vpc, "vpc", "", "IBM Cloud VPC Name")
cmd.Flags().StringVar(&opts.PowerVSPlatform.SysType, "sys-type", opts.PowerVSPlatform.SysType, "System type used to host the instance(e.g: s922, e980, e880). Default is s922")
- cmd.Flags().StringVar(&opts.PowerVSPlatform.ProcType, "proc-type", opts.PowerVSPlatform.ProcType, "Processor type (dedicated, shared, capped). Default is shared")
+ cmd.Flags().Var(&opts.PowerVSPlatform.ProcType, "proc-type", "Processor type (dedicated, shared, capped). Default is shared")
cmd.Flags().StringVar(&opts.PowerVSPlatform.Processors, "processors", opts.PowerVSPlatform.Processors, "Number of processors allocated. Default is 0.5")
cmd.Flags().Int32Var(&opts.PowerVSPlatform.Memory, "memory", opts.PowerVSPlatform.Memory, "Amount of memory allocated (in GB). Default is 32")
diff --git a/cmd/consolelogs/aws/getlogs.go b/cmd/consolelogs/aws/getlogs.go
index cac11ac0820..9d47a640902 100644
--- a/cmd/consolelogs/aws/getlogs.go
+++ b/cmd/consolelogs/aws/getlogs.go
@@ -15,7 +15,7 @@ import (
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/cmd/log"
"github.com/openshift/hypershift/cmd/util"
diff --git a/cmd/infra/aws/create_iam.go b/cmd/infra/aws/create_iam.go
index c533931148d..9ba4d22be8e 100644
--- a/cmd/infra/aws/create_iam.go
+++ b/cmd/infra/aws/create_iam.go
@@ -16,7 +16,7 @@ import (
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/cmd/log"
"github.com/openshift/hypershift/cmd/util"
diff --git a/cmd/install/assets/assets.go b/cmd/install/assets/assets.go
index 4f8c2d3a24d..9132a613186 100644
--- a/cmd/install/assets/assets.go
+++ b/cmd/install/assets/assets.go
@@ -39,7 +39,7 @@ var capiResources = map[string]string{
"cluster-api-provider-ibmcloud/infrastructure.cluster.x-k8s.io_ibmpowervsmachines.yaml": "v1beta1",
"cluster-api-provider-ibmcloud/infrastructure.cluster.x-k8s.io_ibmpowervsmachinetemplates.yaml": "v1beta1",
"cluster-api-provider-ibmcloud/infrastructure.cluster.x-k8s.io_ibmvpcclusters.yaml": "v1alpha4",
- "hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml": "v1alpha1",
+ "hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml": "v1beta1",
"cluster-api-provider-kubevirt/infrastructure.cluster.x-k8s.io_kubevirtclusters.yaml": "v1alpha1",
"cluster-api-provider-kubevirt/infrastructure.cluster.x-k8s.io_kubevirtmachines.yaml": "v1alpha1",
"cluster-api-provider-kubevirt/infrastructure.cluster.x-k8s.io_kubevirtmachinetemplates.yaml": "v1alpha1",
@@ -69,7 +69,7 @@ func getContents(fs embed.FS, file string) []byte {
}
// CustomResourceDefinitions returns all existing CRDs as controller-runtime objects
-func CustomResourceDefinitions(include func(path string) bool) []crclient.Object {
+func CustomResourceDefinitions(include func(path string) bool, transform func(*apiextensionsv1.CustomResourceDefinition)) []crclient.Object {
var allCrds []crclient.Object
err := fs.WalkDir(crds, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
@@ -79,7 +79,11 @@ func CustomResourceDefinitions(include func(path string) bool) []crclient.Object
return nil
}
if include(path) {
- allCrds = append(allCrds, getCustomResourceDefinition(crds, path))
+ crd := getCustomResourceDefinition(crds, path)
+ if transform != nil {
+ transform(crd)
+ }
+ allCrds = append(allCrds, crd)
}
return nil
})
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_awsendpointservices.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_awsendpointservices.yaml
index 4b3c7bf289b..7a7e04f94a1 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_awsendpointservices.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_awsendpointservices.yaml
@@ -166,8 +166,165 @@ spec:
description: EndpointServiceName is the name of the Endpoint Service
created in the management VPC
type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSEndpointService specifies a request for an Endpoint Service
+ in AWS
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSEndpointServiceSpec defines the desired state of AWSEndpointService
+ properties:
+ networkLoadBalancerName:
+ description: The name of the NLB for which an Endpoint Service should
+ be configured
+ type: string
+ resourceTags:
+ description: Tags to apply to the EndpointService
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources created
+ for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some AWS service
+ do not support empty values. Since tags are added to resources
+ in many services, the length of the tag value must meet the
+ requirements of all services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ subnetIDs:
+ description: SubnetIDs is the list of subnet IDs to which guest nodes
+ can attach
+ items:
+ type: string
+ type: array
required:
- - conditions
+ - networkLoadBalancerName
+ type: object
+ status:
+ description: AWSEndpointServiceStatus defines the observed state of AWSEndpointService
+ properties:
+ conditions:
+ description: "Conditions contains details for the current state of
+ the Endpoint Service request If there is an error processing the
+ request e.g. the NLB doesn't exist, then the Available condition
+ will be false, reason AWSErrorReason, and the error reported in
+ the message. \n Current condition types are: \"Available\""
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ dnsNames:
+ description: DNSName are the names for the records created in the
+ hypershift private zone
+ items:
+ type: string
+ type: array
+ dnsZoneID:
+ description: DNSZoneID is ID for the hypershift private zone
+ type: string
+ endpointID:
+ description: EndpointID is the ID of the Endpoint created in the guest
+ VPC
+ type: string
+ endpointServiceName:
+ description: EndpointServiceName is the name of the Endpoint Service
+ created in the management VPC
+ type: string
type: object
type: object
served: true
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml
index f8b626df61c..b0f4d0075c3 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml
@@ -43,6 +43,8 @@ spec:
jsonPath: .status.conditions[?(@.type=="Available")].message
name: Message
type: string
+ deprecated: true
+ deprecationWarning: v1alpha1 is a deprecated version for HostedCluster
name: v1alpha1
schema:
openAPIV3Schema:
@@ -127,6 +129,12 @@ spec:
format: int32
type: integer
type: object
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for
+ production clusters.
+ type: string
clusterID:
description: ClusterID uniquely identifies this cluster. This is expected
to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -3162,19 +3170,292 @@ spec:
description: Version is the status of the release version applied
to the HostedCluster.
properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image and
+ associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates that
+ may be recommended for this cluster if it meets specific required
+ conditions. Consumers interested in the set of updates that
+ are actually recommended for this cluster should use availableUpdates.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an empty or invalid channel
+ has been specified.
+ items:
+ description: ConditionalUpdate represents an update which is
+ recommended to some clusters on the version the current cluster
+ is reconciling, but which may not be recommended for the current
+ cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules. *
+ Recommended, for whether the update is recommended for
+ the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type FooStatus
+ struct{ // Represents the observations of a foo's current
+ state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the
+ condition transitioned from one status to another.
+ This should be when the underlying condition changed. If
+ that is not known, then using the time when the
+ API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty
+ string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9, the
+ condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last transition.
+ Producers of specific condition types may define
+ expected values and meanings for this field, and
+ whether the values are considered a guaranteed API.
+ The value should be a CamelCase string. This field
+ may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful (see
+ .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the
+ availableUpdates field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update API
+ and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and which
+ do not. The slice is ordered by decreasing precedence.
+ The cluster-version operator will walk the slice
+ in order, and stop after the first it can successfully
+ evaluate. If no condition can be successfully evaluated,
+ the update will not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query classifying
+ clusters. This query query should return
+ a 1 in the match case and a 0 in the does-not-match
+ case. Queries which return no time series,
+ or which return values besides 0 or 1,
+ are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that matchingRules
+ match the cluster state. This is only to be consumed
+ by humans. It may contain Line Feed characters (U+000A),
+ which should be rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
desired:
description: desired is the version that the cluster is reconciling
towards. If the cluster is not yet fully initialized desired
will be set with the information available, which may be an
image or a tag.
properties:
+ channels:
+ description: channels is the set of Cincinnati channels to
+ which the release currently belongs.
+ items:
+ type: string
+ type: array
image:
- description: Image is the image pullspec of an OCP release
- payload image.
- pattern: ^(\w+\S+)$
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should be
+ displayed as a link in user interfaces. The URL field may
+ not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
type: string
- required:
- - image
type: object
history:
description: history contains a list of the most recent versions
@@ -3251,11 +3532,3443 @@ spec:
format: int64
type: integer
required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Version
+ jsonPath: .status.version.history[?(@.state=="Completed")].version
+ name: Version
+ type: string
+ - description: KubeConfig Secret
+ jsonPath: .status.kubeconfig.name
+ name: KubeConfig
+ type: string
+ - description: Progress
+ jsonPath: .status.version.history[?(@.state!="")].state
+ name: Progress
+ type: string
+ - description: Available
+ jsonPath: .status.conditions[?(@.type=="Available")].status
+ name: Available
+ type: string
+ - description: Progressing
+ jsonPath: .status.conditions[?(@.type=="Progressing")].status
+ name: Progressing
+ type: string
+ - description: Message
+ jsonPath: .status.conditions[?(@.type=="Available")].message
+ name: Message
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: HostedCluster is the primary representation of a HyperShift cluster
+ and encapsulates the control plane and common data plane configuration.
+ Creating a HostedCluster results in a fully functional OpenShift control
+ plane with no attached nodes. To support workloads (e.g. pods), a HostedCluster
+ may have one or more associated NodePool resources.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec is the desired behavior of the HostedCluster.
+ properties:
+ additionalTrustBundle:
+ description: AdditionalTrustBundle is a reference to a ConfigMap containing
+ a PEM-encoded X.509 certificate bundle that will be added to the
+ hosted controlplane and nodes
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ auditWebhook:
+ description: "AuditWebhook contains metadata for configuring an audit
+ webhook endpoint for a cluster to process cluster audit events.
+ It references a secret that contains the webhook information for
+ the audit webhook endpoint. It is a secret because if the endpoint
+ has mTLS the kubeconfig will contain client keys. The kubeconfig
+ needs to be stored in the secret with a secret key name that corresponds
+ to the constant AuditWebhookKubeconfigKey. \n This field is currently
+ only supported on the IBMCloud platform."
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ autoscaling:
+ description: Autoscaling specifies auto-scaling behavior that applies
+ to all NodePools associated with the control plane.
+ properties:
+ maxNodeProvisionTime:
+ description: MaxNodeProvisionTime is the maximum time to wait
+ for node provisioning before considering the provisioning to
+ be unsuccessful, expressed as a Go duration string. The default
+ is 15 minutes.
+ pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$
+ type: string
+ maxNodesTotal:
+ description: MaxNodesTotal is the maximum allowable number of
+ nodes across all NodePools for a HostedCluster. The autoscaler
+ will not grow the cluster beyond this number.
+ format: int32
+ minimum: 0
+ type: integer
+ maxPodGracePeriod:
+ description: MaxPodGracePeriod is the maximum seconds to wait
+ for graceful pod termination before scaling down a NodePool.
+ The default is 600 seconds.
+ format: int32
+ minimum: 0
+ type: integer
+ podPriorityThreshold:
+ description: "PodPriorityThreshold enables users to schedule \"best-effort\"
+ pods, which shouldn't trigger autoscaler actions, but only run
+ when there are spare resources available. The default is -10.
+ \n See the following for more details: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption"
+ format: int32
+ type: integer
+ type: object
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for
+ production clusters.
+ type: string
+ clusterID:
+ description: ClusterID uniquely identifies this cluster. This is expected
+ to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ in hexadecimal values). As with a Kubernetes metadata.uid, this
+ ID uniquely identifies this cluster in space and time. This value
+ identifies the cluster in metrics pushed to telemetry and metrics
+ produced by the control plane operators. If a value is not specified,
+ an ID is generated. After initial creation, the value is immutable.
+ pattern: '[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}'
+ type: string
+ configuration:
+ description: Configuration specifies configuration for individual
+ OCP components in the cluster, represented as embedded resources
+ that correspond to the openshift configuration API.
+ properties:
+ apiServer:
+ description: APIServer holds configuration (like serving certificates,
+ client CA and CORS domains) shared by all API servers in the
+ system, among them especially kube-apiserver and openshift-apiserver.
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional,
+ user-defined regular expressions describing hosts for which
+ the API server allows access using the CORS headers. This
+ may be needed to access the API and the integrated OAuth
+ server from JavaScript applications. The values are regular
+ expressions that correspond to the Golang regular expression
+ language.
+ items:
+ type: string
+ type: array
+ audit:
+ default:
+ profile: Default
+ description: audit specifies the settings for audit configuration
+ to be applied to all OpenShift-provided API servers in the
+ cluster.
+ properties:
+ customRules:
+ description: customRules specify profiles per group. These
+ profile take precedence over the top-level profile field
+ if they apply. They are evaluation from top to bottom
+ and the first one that matches, applies.
+ items:
+ description: AuditCustomRule describes a custom rule
+ for an audit profile that takes precedence over the
+ top-level profile.
+ properties:
+ group:
+ description: group is a name of group a request
+ user must be member of in order to this profile
+ to apply.
+ minLength: 1
+ type: string
+ profile:
+ description: "profile specifies the name of the
+ desired audit policy configuration to be deployed
+ to all OpenShift-provided API servers in the cluster.
+ \n The following profiles are provided: - Default:
+ the existing default policy. - WriteRequestBodies:
+ like 'Default', but logs request and response
+ HTTP payloads for write requests (create, update,
+ patch). - AllRequestBodies: like 'WriteRequestBodies',
+ but also logs request and response HTTP payloads
+ for read requests (get, list). - None: no requests
+ are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n If unset, the 'Default'
+ profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ required:
+ - group
+ - profile
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - group
+ x-kubernetes-list-type: map
+ profile:
+ default: Default
+ description: "profile specifies the name of the desired
+ top-level audit profile to be applied to all requests
+ sent to any of the OpenShift-provided API servers in
+ the cluster (kube-apiserver, openshift-apiserver and
+ oauth-apiserver), with the exception of those requests
+ that match one or more of the customRules. \n The following
+ profiles are provided: - Default: default policy which
+ means MetaData level logging with the exception of events
+ (not logged at all), oauthaccesstokens and oauthauthorizetokens
+ (both logged at RequestBody level). - WriteRequestBodies:
+ like 'Default', but logs request and response HTTP payloads
+ for write requests (create, update, patch). - AllRequestBodies:
+ like 'WriteRequestBodies', but also logs request and
+ response HTTP payloads for read requests (get, list).
+ - None: no requests are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n Warning: It is not recommended
+ to disable audit logging by using the `None` profile
+ unless you are fully aware of the risks of not logging
+ data that can be beneficial when troubleshooting issues.
+ If you disable audit logging and a support situation
+ arises, you might need to enable audit logging and reproduce
+ the issue in order to troubleshoot properly. \n If unset,
+ the 'Default' profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ type: object
+ clientCA:
+ description: 'clientCA references a ConfigMap containing a
+ certificate bundle for the signers that will be recognized
+ for incoming client certificates in addition to the operator
+ managed signers. If this is empty, then only operator managed
+ signers are valid. You usually only have to set this if
+ you have your own PKI you wish to honor client certificates
+ from. The ConfigMap must exist in the openshift-config namespace
+ and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
+ - CA bundle.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ encryption:
+ description: encryption allows the configuration of encryption
+ of resources at the datastore layer.
+ properties:
+ type:
+ description: "type defines what encryption type should
+ be used to encrypt resources at the datastore layer.
+ When this field is unset (i.e. when it is set to the
+ empty string), identity is implied. The behavior of
+ unset can and will change over time. Even if encryption
+ is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in best
+ practices. \n When encryption is enabled, all sensitive
+ resources shipped with the platform are encrypted. This
+ list of sensitive resources can and will change over
+ time. The current authoritative list is: \n 1. secrets
+ 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io
+ 5. oauthauthorizetokens.oauth.openshift.io"
+ enum:
+ - ""
+ - identity
+ - aescbc
+ type: string
+ type: object
+ servingCerts:
+ description: servingCert is the TLS cert info for serving
+ secure traffic. If not specified, operator managed certificates
+ will be used for serving secure traffic.
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing
+ the TLS cert info for serving secure traffic to specific
+ hostnames. If no named certificates are provided, or
+ no named certificates match the server name as understood
+ by a client, the defaultServingCertificate will be used.
+ items:
+ description: APIServerNamedServingCert maps a server
+ DNS name, as understood by a client, to a certificate.
+ properties:
+ names:
+ description: names is a optional list of explicit
+ DNS names (leading wildcards allowed) that should
+ use this certificate to serve secure traffic.
+ If no names are provided, the implicit names will
+ be extracted from the certificates. Exact names
+ trump over wildcard names. Explicit names defined
+ here trump over extracted implicit names.
+ items:
+ type: string
+ type: array
+ servingCertificate:
+ description: 'servingCertificate references a kubernetes.io/tls
+ type secret containing the TLS cert info for serving
+ secure traffic. The secret must exist in the openshift-config
+ namespace and contain the following required fields:
+ - Secret.Data["tls.key"] - TLS private key. -
+ Secret.Data["tls.crt"] - TLS certificate.'
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for TLS
+ connections for externally exposed servers. \n If unset,
+ a default (which may change between releases) is chosen.
+ Note that only Old, Intermediate and Custom profiles are
+ currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security profile.
+ Be extremely careful using a custom profile as invalid
+ configurations can be catastrophic. An example custom
+ profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305
+ - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1"
+ nullable: true
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher
+ algorithms that are negotiated during the TLS handshake.
+ \ Operators may remove entries their operands do
+ not support. For example, to use DES-CBC3-SHA (yaml):
+ \n ciphers: - DES-CBC3-SHA"
+ items:
+ type: string
+ type: array
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the
+ minimal version of the TLS protocol that is negotiated
+ during the TLS handshake. For example, to use TLS
+ versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion:
+ TLSv1.1 \n NOTE: currently the highest minTLSVersion
+ allowed is VersionTLS12"
+ enum:
+ - VersionTLS10
+ - VersionTLS11
+ - VersionTLS12
+ - VersionTLS13
+ type: string
+ type: object
+ intermediate:
+ description: "intermediate is a TLS security profile based
+ on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ minTLSVersion: TLSv1.2"
+ nullable: true
+ type: object
+ modern:
+ description: "modern is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ nullable: true
+ type: object
+ old:
+ description: "old is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
+ ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
+ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256
+ - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384
+ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA
+ - DES-CBC3-SHA minTLSVersion: TLSv1.0"
+ nullable: true
+ type: object
+ type:
+ description: "type is one of Old, Intermediate, Modern
+ or Custom. Custom provides the ability to specify individual
+ TLS security profile parameters. Old, Intermediate and
+ Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change
+ over time as new ciphers are developed and existing
+ ciphers are found to be insecure. Depending on precisely
+ which ciphers are available to a process, the list may
+ be reduced. \n Note that the Modern profile is currently
+ not supported because it is not yet well adopted by
+ common software libraries."
+ enum:
+ - Old
+ - Intermediate
+ - Modern
+ - Custom
+ type: string
+ type: object
+ type: object
+ authentication:
+ description: Authentication specifies cluster-wide settings for
+ authentication (like OAuth and webhook token authenticators).
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for an
+ external OAuth server. This discovery document can be viewed
+ from its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence
+ over any metadata reference stored in status. The key "oauthMetadata"
+ is used to locate the data. If specified and the config
+ map or expected key is not found, no metadata is served.
+ If the specified metadata is not valid, no metadata is served.
+ The namespace for this config map is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ serviceAccountIssuer:
+ description: 'serviceAccountIssuer is the identifier of the
+ bound service account token issuer. The default is https://kubernetes.default.svc
+ WARNING: Updating this field will result in the invalidation
+ of all bound tokens with the previous issuer value. Unless
+ the holder of a bound token has explicit support for a change
+ in issuer, they will not request a new bound token until
+ pod restart or until their existing token exceeds 80% of
+ its duration.'
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing
+ authentication mode in use. Specifically, it manages the
+ component that responds to login attempts. The default is
+ IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticator:
+ description: webhookTokenAuthenticator configures a remote
+ token reviewer. These remote authentication webhooks can
+ be used to verify bearer tokens via the tokenreviews.authentication.k8s.io
+ REST API. This is required to honor bearer tokens that are
+ provisioned by an external authentication service.
+ properties:
+ kubeConfig:
+ description: "kubeConfig references a secret that contains
+ kube config file data which describes how to access
+ the remote webhook service. The namespace for the referenced
+ secret is openshift-config. \n For further details,
+ see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ \n The key \"kubeConfig\" is used to locate the data.
+ If the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data is
+ not valid, the webhook is not honored."
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - kubeConfig
+ type: object
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators is DEPRECATED, setting
+ it has no effect.
+ items:
+ description: deprecatedWebhookTokenAuthenticator holds the
+ necessary configuration options for a remote token authenticator.
+ It's the same as WebhookTokenAuthenticator but it's missing
+ the 'required' validation on KubeConfig field.
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file data
+ which describes how to access the remote webhook service.
+ For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data. If
+ the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data
+ is not valid, the webhook is not honored. The namespace
+ for this secret is determined by the point of use.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ featureGate:
+ description: FeatureGate holds cluster-wide information about
+ feature gates.
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling
+ of any feature. Turning this feature set on IS NOT SUPPORTED,
+ CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its
+ nature, this setting cannot be validated. If you have any
+ typos or accidentally apply invalid combinations your cluster
+ may fail in an unrecoverable way. featureSet must equal
+ "CustomNoUpgrade" must be set to use this field.
+ nullable: true
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates that
+ you want to force off
+ items:
+ type: string
+ type: array
+ enabled:
+ description: enabled is a list of all feature gates that
+ you want to force on
+ items:
+ type: string
+ type: array
+ type: object
+ featureSet:
+ description: featureSet changes the list of features in the
+ cluster. The default is empty. Be very careful adjusting
+ this setting. Turning on or off features may cause irreversible
+ changes in your cluster which cannot be undone.
+ type: string
+ type: object
+ image:
+ description: Image governs policies related to imagestream imports
+ and runtime configuration for external registries. It allows
+ cluster admins to configure which registries OpenShift is allowed
+ to import images from, extra CA trust bundles for external registries,
+ and policies to block or allow registry hostnames. When exposing
+ OpenShift's image registry to the public, this also lets cluster
+ admins specify the external hostname.
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap
+ containing additional CAs that should be trusted during
+ imagestream import, pod image pull, build image pull, and
+ imageregistry pullthrough. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container
+ image registries that normal users may import images from.
+ Set this list to the registries that you trust to contain
+ valid Docker images and that you want applications to be
+ able to import from. Users with permission to create Images
+ or ImageStreamMappings via the API are not affected by this
+ policy - typically only administrators or system integrations
+ will have those permissions.
+ items:
+ description: RegistryLocation contains a location of the
+ registry specified by the registry domain name. The domain
+ name might include wildcards, like '*' or '??'.
+ properties:
+ domainName:
+ description: domainName specifies a domain name for
+ the registry In case the registry use non-standard
+ (80 or 443) port, the port should be included in the
+ domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry
+ is secure (https) or insecure (http) By default (if
+ not specified) the registry is assumed as secure.
+ type: boolean
+ type: object
+ type: array
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames
+ for the default external image registry. The external hostname
+ should be set only when the image registry is exposed externally.
+ The first value is used in 'publicDockerImageRepository'
+ field in ImageStreams. The value must be in "hostname[:port]"
+ format.
+ items:
+ type: string
+ type: array
+ registrySources:
+ description: registrySources contains configuration that determines
+ how the container runtime should treat individual registries
+ when accessing images for builds+pods. (e.g. whether or
+ not to allow insecure access). It does not contain configuration
+ for the internal cluster registry.
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are the only registries
+ permitted for image pull and push actions. All other
+ registries are denied. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ items:
+ type: string
+ type: array
+ blockedRegistries:
+ description: "blockedRegistries cannot be used for image
+ pull and push actions. All other registries are permitted.
+ \n Only one of BlockedRegistries or AllowedRegistries
+ may be set."
+ items:
+ type: string
+ type: array
+ containerRuntimeSearchRegistries:
+ description: 'containerRuntimeSearchRegistries are registries
+ that will be searched when pulling images that do not
+ have fully qualified domains in their pull specs. Registries
+ will be searched in the order provided in the list.
+ Note: this search list only works with the container
+ runtime, i.e CRI-O. Will NOT work with builds or imagestream
+ imports.'
+ format: hostname
+ items:
+ type: string
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ insecureRegistries:
+ description: insecureRegistries are registries which do
+ not have a valid TLS certificates or only support HTTP
+ connections.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ ingress:
+ description: Ingress holds cluster-wide information about ingress,
+ including the default ingress domain used for routes.
+ properties:
+ appsDomain:
+ description: appsDomain is an optional domain to use instead
+ of the one specified in the domain field when a Route is
+ created without specifying an explicit host. If appsDomain
+ is nonempty, this value is used to generate default host
+ values for Route. Unlike domain, appsDomain may be modified
+ after installation. This assumes a new ingresscontroller
+ has been setup with a wildcard certificate.
+ type: string
+ componentRoutes:
+ description: "componentRoutes is an optional list of routes
+ that are managed by OpenShift components that a cluster-admin
+ is able to configure the hostname and serving certificate
+ for. The namespace and name of each route in this list should
+ match an existing entry in the status.componentRoutes list.
+ \n To determine the set of configurable Routes, look at
+ namespace and name of entries in the .status.componentRoutes
+ list, where participating operators write the status of
+ configurable routes."
+ items:
+ description: ComponentRouteSpec allows for configuration
+ of a route's hostname and serving certificate.
+ properties:
+ hostname:
+ description: hostname is the hostname that should be
+ used by the route.
+ pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$
+ type: string
+ name:
+ description: "name is the logical name of the route
+ to customize. \n The namespace and name of this componentRoute
+ must match a corresponding entry in the list of status.componentRoutes
+ if the route is to be customized."
+ maxLength: 256
+ minLength: 1
+ type: string
+ namespace:
+ description: "namespace is the namespace of the route
+ to customize. \n The namespace and name of this componentRoute
+ must match a corresponding entry in the list of status.componentRoutes
+ if the route is to be customized."
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ servingCertKeyPairSecret:
+ description: servingCertKeyPairSecret is a reference
+ to a secret of type `kubernetes.io/tls` in the openshift-config
+ namespace. The serving cert/key pair must match and
+ will be used by the operator to fulfill the intent
+ of serving with this name. If the custom hostname
+ uses the default routing suffix of the cluster, the
+ Secret specification for a serving certificate will
+ not be needed.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - hostname
+ - name
+ - namespace
+ type: object
+ type: array
+ domain:
+ description: "domain is used to generate a default host name
+ for a route when the route's host name is empty. The generated
+ host name will follow this pattern: \"..\".
+ \n It is also used as the default wildcard domain suffix
+ for ingress. The default ingresscontroller domain will follow
+ this pattern: \"*.\". \n Once set, changing domain
+ is not currently supported."
+ type: string
+ requiredHSTSPolicies:
+ description: "requiredHSTSPolicies specifies HSTS policies
+ that are required to be set on newly created or updated
+ routes matching the domainPattern/s and namespaceSelector/s
+ that are specified in the policy. Each requiredHSTSPolicy
+ must have at least a domainPattern and a maxAge to validate
+ a route HSTS Policy route annotation, and affect route admission.
+ \n A candidate route is checked for HSTS Policies if it
+ has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\"
+ E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains
+ \n - For each candidate route, if it matches a requiredHSTSPolicy
+ domainPattern and optional namespaceSelector, then the maxAge,
+ preloadPolicy, and includeSubdomainsPolicy must be valid
+ to be admitted. Otherwise, the route is rejected. - The
+ first match, by domainPattern and optional namespaceSelector,
+ in the ordering of the RequiredHSTSPolicies determines the
+ route's admission status. - If the candidate route doesn't
+ match any requiredHSTSPolicy domainPattern and optional
+ namespaceSelector, then it may use any HSTS Policy annotation.
+ \n The HSTS policy configuration may be changed after routes
+ have already been created. An update to a previously admitted
+ route may then fail if the updated route does not conform
+ to the updated HSTS policy configuration. However, changing
+ the HSTS policy configuration will not cause a route that
+ is already admitted to stop working. \n Note that if there
+ are no RequiredHSTSPolicies, any HSTS Policy annotation
+ on the route is valid."
+ items:
+ properties:
+ domainPatterns:
+ description: "domainPatterns is a list of domains for
+ which the desired HSTS annotations are required. If
+ domainPatterns is specified and a route is created
+ with a spec.host matching one of the domains, the
+ route must specify the HSTS Policy components described
+ in the matching RequiredHSTSPolicy. \n The use of
+ wildcards is allowed like this: *.foo.com matches
+ everything under foo.com. foo.com only matches foo.com,
+ so to cover foo.com and everything under it, you must
+ specify *both*."
+ items:
+ type: string
+ minItems: 1
+ type: array
+ includeSubDomainsPolicy:
+ description: 'includeSubDomainsPolicy means the HSTS
+ Policy should apply to any subdomains of the host''s
+ domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy
+ was set to RequireIncludeSubDomains: - the host app.bar.foo.com
+ would inherit the HSTS Policy of bar.foo.com - the
+ host bar.foo.com would inherit the HSTS Policy of
+ bar.foo.com - the host foo.com would NOT inherit the
+ HSTS Policy of bar.foo.com - the host def.foo.com
+ would NOT inherit the HSTS Policy of bar.foo.com'
+ enum:
+ - RequireIncludeSubDomains
+ - RequireNoIncludeSubDomains
+ - NoOpinion
+ type: string
+ maxAge:
+ description: maxAge is the delta time range in seconds
+ during which hosts are regarded as HSTS hosts. If
+ set to 0, it negates the effect, and hosts are removed
+ as HSTS hosts. If set to 0 and includeSubdomains is
+ specified, all subdomains of the host are also removed
+ as HSTS hosts. maxAge is a time-to-live value, and
+ if this policy is not refreshed on a client, the HSTS
+ policy will eventually expire on that client.
+ properties:
+ largestMaxAge:
+ description: The largest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age This value can
+ be left unspecified, in which case no upper limit
+ is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ smallestMaxAge:
+ description: The smallest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age Setting max-age=0
+ allows the deletion of an existing HSTS header
+ from a host. This is a necessary tool for administrators
+ to quickly correct mistakes. This value can be
+ left unspecified, in which case no lower limit
+ is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ type: object
+ namespaceSelector:
+ description: namespaceSelector specifies a label selector
+ such that the policy applies only to those routes
+ that are in namespaces with labels that match the
+ selector, and are in one of the DomainPatterns. Defaults
+ to the empty LabelSelector, which matches everything.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a
+ selector that contains values, a key, and an
+ operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the
+ values array must be empty. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value". The
+ requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ preloadPolicy:
+ description: preloadPolicy directs the client to include
+ hosts in its host preload list so that it never needs
+ to do an initial load to get the HSTS header (note
+ that this is not defined in RFC 6797 and is therefore
+ client implementation-dependent).
+ enum:
+ - RequirePreload
+ - RequireNoPreload
+ - NoOpinion
+ type: string
+ required:
+ - domainPatterns
+ type: object
+ type: array
+ type: object
+ network:
+ description: 'Network holds cluster-wide information about the
+ network. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin,
+ etc. Please view network.spec for an explanation on what applies
+ when configuring this resource. TODO (csrwng): Add validation
+ here to exclude changes that conflict with networking settings
+ in the HostedCluster.Spec.Networking field.'
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field
+ is immutable after installation.
+ items:
+ description: ClusterNetworkEntry is a contiguous block of
+ IP addresses from which pod IPs are allocated.
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate
+ to each node. If this field is not used by the plugin,
+ it can be left unset.
+ format: int32
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ externalIP:
+ description: externalIP defines configuration for controllers
+ that affect Service.ExternalIP. If nil, then ExternalIP
+ is not allowed to be set.
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from which
+ to automatically assign Service.ExternalIP. These are
+ assigned when the service is of type LoadBalancer. In
+ general, this is only useful for bare-metal clusters.
+ In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected
+ by any ExternalIPPolicy rules. Currently, only one entry
+ may be provided.
+ items:
+ type: string
+ type: array
+ policy:
+ description: policy is a set of restrictions applied to
+ the ExternalIP field. If nil or empty, then ExternalIP
+ is not allowed to be set.
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed CIDRs.
+ items:
+ type: string
+ type: array
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed
+ CIDRs. These take precedence over allowedCIDRs.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed
+ (e.g. OpenShiftSDN). This should match a value that the
+ cluster-network-operator understands, or else no networking
+ will be installed. Currently supported values are: - OpenShiftSDN
+ This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only
+ support a single entry here. This field is immutable after
+ installation.
+ items:
+ type: string
+ type: array
+ serviceNodePortRange:
+ description: The port range allowed for Services of type NodePort.
+ If not specified, the default of 30000-32767 will be used.
+ Such Services without a NodePort specified will have one
+ automatically allocated from this range. This parameter
+ can be updated after the cluster is installed.
+ pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: string
+ type: object
+ oauth:
+ description: OAuth holds cluster-wide information about OAuth.
+ It is used to configure the integrated OAuth server. This configuration
+ is only honored when the top level Authentication config has
+ type set to IntegratedOAuth.
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways
+ for a user to identify themselves. When this list is empty,
+ no identities are provisioned for users.
+ items:
+ description: IdentityProvider provides identities for users
+ authenticating using credentials
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options
+ for the BasicAuth IdP
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to locate
+ the data. If specified and the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified certificate data is
+ not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used to
+ locate the data. If specified and the secret or
+ expected key is not found, the identity provider
+ is not honored. If the specified certificate data
+ is not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ type: object
+ github:
+ description: github enables user authentication using
+ GitHub credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. This can only be configured when hostname
+ is set to a non-empty value. The namespace for
+ this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostname:
+ description: hostname is the optional domain (e.g.
+ "mycompany.com") for use with a hosted instance
+ of GitHub Enterprise. It must match the GitHub
+ Enterprise settings value configured at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts
+ which organizations are allowed to log in
+ items:
+ type: string
+ type: array
+ teams:
+ description: teams optionally restricts which teams
+ are allowed to log in. Format is /.
+ items:
+ type: string
+ type: array
+ type: object
+ gitlab:
+ description: gitlab enables user authentication using
+ GitLab credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the oauth server base URL
+ type: string
+ type: object
+ google:
+ description: google enables user authentication using
+ Google credentials
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostedDomain:
+ description: hostedDomain is the optional Google
+ App domain (e.g. "mycompany.com") to restrict
+ logins to
+ type: string
+ type: object
+ htpasswd:
+ description: htpasswd enables user authentication using
+ an HTPasswd file to validate credentials
+ properties:
+ fileData:
+ description: fileData is a required reference to
+ a secret by name containing the data to use as
+ the htpasswd file. The key "htpasswd" is used
+ to locate the data. If the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not
+ valid, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ keystone:
+ description: keystone enables user authentication using
+ keystone password credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ domainName:
+ description: domainName is required for keystone
+ v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to locate
+ the data. If specified and the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified certificate data is
+ not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used to
+ locate the data. If specified and the secret or
+ expected key is not found, the identity provider
+ is not honored. If the specified certificate data
+ is not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ type: object
+ ldap:
+ description: ldap enables user authentication using
+ LDAP credentials
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to
+ identities
+ properties:
+ email:
+ description: email is the list of attributes
+ whose values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ id:
+ description: id is the list of attributes whose
+ values should be used as the user ID. Required.
+ First non-empty attribute is used. At least
+ one attribute is required. If none of the
+ listed attribute have a value, authentication
+ fails. LDAP standard identity attribute is
+ "dn"
+ items:
+ type: string
+ type: array
+ name:
+ description: name is the list of attributes
+ whose values should be used as the display
+ name. Optional. If unspecified, no display
+ name is set for the identity LDAP standard
+ display name attribute is "cn"
+ items:
+ type: string
+ type: array
+ preferredUsername:
+ description: preferredUsername is the list of
+ attributes whose values should be used as
+ the preferred username. LDAP standard login
+ attribute is "uid"
+ items:
+ type: string
+ type: array
+ type: object
+ bindDN:
+ description: bindDN is an optional DN to bind with
+ during the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference
+ to a secret by name containing a password to bind
+ with during the search phase. The key "bindPassword"
+ is used to locate the data. If specified and the
+ secret or expected key is not found, the identity
+ provider is not honored. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: 'insecure, if true, indicates the connection
+ should not use TLS WARNING: Should not be set
+ to `true` with the URL scheme "ldaps://" as "ldaps://"
+ URLs always attempt to connect using TLS, even
+ when `insecure` is set to `true` When `true`,
+ "ldap://" URLS connect insecurely. When `false`,
+ "ldap://" URLs are upgraded to a TLS connection
+ using StartTLS as specified in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies
+ the LDAP search parameters to use. The syntax
+ of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ type: object
+ mappingMethod:
+ description: mappingMethod determines how identities
+ from this provider are mapped to users Defaults to
+ "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities
+ returned by this provider. - It MUST be unique and
+ not shared by any other identity provider used - It
+ MUST be a valid path segment: name cannot equal "."
+ or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using
+ OpenID credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ claims:
+ description: claims mappings
+ properties:
+ email:
+ description: email is the list of claims whose
+ values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ groups:
+ description: groups is the list of claims value
+ of which should be used to synchronize groups
+ from the OIDC provider to OpenShift for the
+ user. If multiple claims are specified, the
+ first one with a non-empty value is used.
+ items:
+ description: OpenIDClaim represents a claim
+ retrieved from an OpenID provider's tokens
+ or userInfo responses
+ minLength: 1
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ description: name is the list of claims whose
+ values should be used as the display name.
+ Optional. If unspecified, no display name
+ is set for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ preferredUsername:
+ description: preferredUsername is the list of
+ claims whose values should be used as the
+ preferred username. If unspecified, the preferred
+ username is determined from the value of the
+ sub claim
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ extraAuthorizeParameters:
+ additionalProperties:
+ type: string
+ description: extraAuthorizeParameters are any custom
+ parameters to add to the authorize request.
+ type: object
+ extraScopes:
+ description: extraScopes are any scopes to request
+ in addition to the standard "openid" scope.
+ items:
+ type: string
+ type: array
+ issuer:
+ description: issuer is the URL that the OpenID Provider
+ asserts as its Issuer Identifier. It must use
+ the https scheme with no query or fragment component.
+ type: string
+ type: object
+ requestHeader:
+ description: requestHeader enables user authentication
+ using request header credentials
+ properties:
+ ca:
+ description: ca is a required reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. Specifically,
+ it allows verification of incoming requests to
+ prevent header spoofing. The key "ca.crt" is used
+ to locate the data. If the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ challengeURL:
+ description: challengeURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect WWW-Authenticate
+ challenges will be redirected here. ${url} is
+ replaced with the current URL, escaped to be safe
+ in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string
+ https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional list
+ of common names to require a match from. If empty,
+ any client certificate validated against the clientCA
+ bundle is considered authoritative.
+ items:
+ type: string
+ type: array
+ emailHeaders:
+ description: emailHeaders is the set of headers
+ to check for the email address
+ items:
+ type: string
+ type: array
+ headers:
+ description: headers is the set of headers to check
+ for identity information
+ items:
+ type: string
+ type: array
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect interactive logins
+ will be redirected here ${url} is replaced with
+ the current URL, escaped to be safe in a query
+ parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string
+ https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers to
+ check for the display name
+ items:
+ type: string
+ type: array
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set
+ of headers to check for the preferred username
+ items:
+ type: string
+ type: array
+ type: object
+ type:
+ description: type identifies the identity provider type
+ for this entry.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ templates:
+ description: templates allow you to customize pages like the
+ login page.
+ properties:
+ error:
+ description: error is the name of a secret that specifies
+ a go template to use to render error pages during the
+ authentication or grant flow. The key "errors.html"
+ is used to locate the template data. If specified and
+ the secret or expected key is not found, the default
+ error page is used. If the specified template is not
+ valid, the default error page is used. If unspecified,
+ the default error page is used. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ login:
+ description: login is the name of a secret that specifies
+ a go template to use to render the login page. The key
+ "login.html" is used to locate the template data. If
+ specified and the secret or expected key is not found,
+ the default login page is used. If the specified template
+ is not valid, the default login page is used. If unspecified,
+ the default login page is used. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ providerSelection:
+ description: providerSelection is the name of a secret
+ that specifies a go template to use to render the provider
+ selection page. The key "providers.html" is used to
+ locate the template data. If specified and the secret
+ or expected key is not found, the default provider selection
+ page is used. If the specified template is not valid,
+ the default provider selection page is used. If unspecified,
+ the default provider selection page is used. The namespace
+ for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ tokenConfig:
+ description: tokenConfig contains options for authorization
+ and access tokens
+ properties:
+ accessTokenInactivityTimeout:
+ description: "accessTokenInactivityTimeout defines the
+ token inactivity timeout for tokens granted by any client.
+ The value represents the maximum amount of time that
+ can occur between consecutive uses of the token. Tokens
+ become invalid if they are not used within this temporal
+ window. The user will need to acquire a new token to
+ regain access once a token times out. Takes valid time
+ duration string such as \"5m\", \"1.5h\" or \"2h45m\".
+ The minimum allowed value for duration is 300s (5 minutes).
+ If the timeout is configured per client, then that value
+ takes precedence. If the timeout value is not specified
+ and the client does not override the value, then tokens
+ are valid until their lifetime. \n WARNING: existing
+ tokens' timeout will not be affected (lowered) by changing
+ this value"
+ type: string
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED:
+ setting this field has no effect.'
+ format: int32
+ type: integer
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum
+ age of access tokens
+ format: int32
+ type: integer
+ type: object
+ type: object
+ proxy:
+ description: Proxy holds cluster-wide information on how to configure
+ default proxies for the cluster.
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS
+ requests. Empty means unset and will not result in an env
+ var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs and/or IPs for which the proxy should not be
+ used. Empty means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ items:
+ type: string
+ type: array
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle. The trustedCA field should only
+ be consumed by a proxy validator. The validator is responsible
+ for reading the certificate bundle from the required key
+ \"ca-bundle.crt\", merging it with the system default trust
+ bundle, and writing the merged trust bundle to a ConfigMap
+ named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. Clients that expect to make proxy connections
+ must use the trusted-ca-bundle for all HTTPS requests to
+ the proxy, and may use the trusted-ca-bundle for non-proxy
+ HTTPS requests as well. \n The namespace for the ConfigMap
+ referenced by trustedCA is \"openshift-config\". Here is
+ an example ConfigMap (in yaml): \n apiVersion: v1 kind:
+ ConfigMap metadata: name: user-ca-bundle namespace: openshift-config
+ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom
+ CA certificate bundle. -----END CERTIFICATE-----"
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ scheduler:
+ description: Scheduler holds cluster-wide config information to
+ run the Kubernetes Scheduler and influence its placement decisions.
+ The canonical name for this config is `cluster`.
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide
+ default node selector to restrict pod placement to specific
+ nodes. This is applied to the pods created in all namespaces
+ and creates an intersection with any existing nodeSelectors
+ already set on a pod, additionally constraining that pod''s
+ selector. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having
+ project-wide node selectors won''t be impacted even if this
+ field is set. This adds an annotation section to the namespace.
+ For example, if a new namespace is created with node-selector=''type=user-node,region=east'',
+ the annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector
+ annotation is set on the project the value is used in preference
+ to the value we are setting for defaultNodeSelector field.
+ For instance, openshift.io/node-selector: "type=user-node,region=west"
+ means that the default of "type=user-node,region=east" set
+ in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to be
+ schedulable. When this flag is turned on, all the master
+ nodes in the cluster will be made schedulable, so that workload
+ pods can run on them. The default value for this field is
+ false, meaning none of the master nodes are schedulable.
+ Important Note: Once the workload pods start running on
+ the master nodes, extreme care must be taken to ensure that
+ cluster-critical control plane components are not impacted.
+ Please turn on this field after doing due diligence.'
+ type: boolean
+ policy:
+ description: 'DEPRECATED: the scheduler Policy API has been
+ deprecated and will be removed in a future release. policy
+ is a reference to a ConfigMap containing scheduler policy
+ which has user specified predicates and priorities. If this
+ ConfigMap is not available scheduler will default to use
+ DefaultAlgorithmProvider. The namespace for this configmap
+ is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ profile:
+ description: "profile sets which scheduling profile should
+ be set in order to configure scheduling decisions for new
+ pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\",
+ \"NoScoring\" Defaults to \"LowNodeUtilization\""
+ enum:
+ - ""
+ - LowNodeUtilization
+ - HighNodeUtilization
+ - NoScoring
+ type: string
+ type: object
+ type: object
+ controllerAvailabilityPolicy:
+ default: SingleReplica
+ description: ControllerAvailabilityPolicy specifies the availability
+ policy applied to critical control plane components. The default
+ value is SingleReplica.
+ type: string
+ dns:
+ description: DNS specifies DNS configuration for the cluster.
+ properties:
+ baseDomain:
+ description: BaseDomain is the base domain of the cluster.
+ type: string
+ privateZoneID:
+ description: PrivateZoneID is the Hosted Zone ID where all the
+ DNS records that are only available internally to the cluster
+ exist.
+ type: string
+ publicZoneID:
+ description: PublicZoneID is the Hosted Zone ID where all the
+ DNS records that are publicly accessible to the internet exist.
+ type: string
+ required:
+ - baseDomain
+ type: object
+ etcd:
+ default:
+ managed:
+ storage:
+ persistentVolume:
+ size: 4Gi
+ type: PersistentVolume
+ managementType: Managed
+ description: Etcd specifies configuration for the control plane etcd
+ cluster. The default ManagementType is Managed. Once set, the ManagementType
+ cannot be changed.
+ properties:
+ managed:
+ description: Managed specifies the behavior of an etcd cluster
+ managed by HyperShift.
+ properties:
+ storage:
+ description: Storage specifies how etcd data is persisted.
+ properties:
+ persistentVolume:
+ description: PersistentVolume is the configuration for
+ PersistentVolume etcd storage. With this implementation,
+ a PersistentVolume will be allocated for every etcd
+ member (either 1 or 3 depending on the HostedCluster
+ control plane availability configuration).
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Size is the minimum size of the data
+ volume for each etcd member.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClassName:
+ description: "StorageClassName is the StorageClass
+ of the data volume for each etcd member. \n See
+ https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1."
+ type: string
+ type: object
+ restoreSnapshotURL:
+ description: RestoreSnapshotURL allows an optional list
+ of URLs to be provided where an etcd snapshot can be
+ downloaded, for example a pre-signed URL referencing
+ a storage service, one URL per replica. This snapshot
+ will be restored on initial startup, only when the etcd
+ PV is empty.
+ items:
+ type: string
+ type: array
+ type:
+ description: Type is the kind of persistent storage implementation
+ to use for etcd.
+ enum:
+ - PersistentVolume
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - storage
+ type: object
+ managementType:
+ description: ManagementType defines how the etcd cluster is managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged specifies configuration which enables the
+ control plane to integrate with an eternally managed etcd cluster.
+ properties:
+ endpoint:
+ description: "Endpoint is the full etcd cluster client endpoint
+ URL. For example: \n https://etcd-client:2379 \n If the
+ URL uses an HTTPS scheme, the TLS field is required."
+ pattern: ^https://
+ type: string
+ tls:
+ description: TLS specifies TLS configuration for HTTPS etcd
+ client endpoints.
+ properties:
+ clientSecret:
+ description: "ClientSecret refers to a secret for client
+ mTLS authentication with the etcd cluster. It may have
+ the following key/value pairs: \n etcd-client-ca.crt:
+ Certificate Authority value etcd-client.crt: Client
+ certificate value etcd-client.key: Client certificate
+ key value"
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - clientSecret
+ type: object
+ required:
+ - endpoint
+ - tls
+ type: object
+ required:
+ - managementType
+ type: object
+ fips:
+ description: FIPS indicates whether this cluster's nodes will be running
+ in FIPS mode. If set to true, the control plane's ignition server
+ will be configured to expect that nodes joining the cluster will
+ be FIPS-enabled.
+ type: boolean
+ imageContentSources:
+ description: ImageContentSources specifies image mirrors that can
+ be used by cluster nodes to pull content.
+ items:
+ description: ImageContentSource specifies image mirrors that can
+ be used by cluster nodes to pull content. For cluster workloads,
+ if a container image registry host of the pullspec matches Source
+ then one of the Mirrors are substituted as hosts in the pullspec
+ and tried in order to fetch the image.
+ properties:
+ mirrors:
+ description: Mirrors are one or more repositories that may also
+ contain the same images.
+ items:
+ type: string
+ type: array
+ source:
+ description: Source is the repository that users refer to, e.g.
+ in image pull specifications.
+ type: string
+ required:
+ - source
+ type: object
+ type: array
+ infraID:
+ description: InfraID is a globally unique identifier for the cluster.
+ This identifier will be used to associate various cloud resources
+ with the HostedCluster and its associated NodePools.
+ type: string
+ infrastructureAvailabilityPolicy:
+ default: SingleReplica
+ description: InfrastructureAvailabilityPolicy specifies the availability
+ policy applied to infrastructure services which run on cluster nodes.
+ The default value is SingleReplica.
+ type: string
+ issuerURL:
+ default: https://kubernetes.default.svc
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which only
+ works for in-cluster validation.
+ format: uri
+ type: string
+ networking:
+ description: Networking specifies network configuration for the cluster.
+ properties:
+ apiServer:
+ description: APIServer contains advanced network settings for
+ the API server that affect how the APIServer is exposed inside
+ a cluster node.
+ properties:
+ advertiseAddress:
+ description: AdvertiseAddress is the address that nodes will
+ use to talk to the API server. This is an address associated
+ with the loopback adapter of each node. If not specified,
+ 172.20.0.1 is used.
+ type: string
+ allowedCIDRBlocks:
+ description: AllowedCIDRBlocks is an allow list of CIDR blocks
+ that can access the APIServer If not specified, traffic
+ is allowed from all addresses. This depends on underlying
+ support by the cloud provider for Service LoadBalancerSourceRanges
+ items:
+ pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
+ type: string
+ type: array
+ port:
+ description: Port is the port at which the APIServer is exposed
+ inside a node. Other pods using host networking cannot listen
+ on this port. If not specified, 6443 is used.
+ format: int32
+ type: integer
+ type: object
+ clusterNetwork:
+ description: ClusterNetwork is the list of IP address pools for
+ pods.
+ items:
+ description: ClusterNetworkEntry is a single IP address block
+ for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool.
+ type: string
+ hostPrefix:
+ description: HostPrefix is the prefix size to allocate to
+ each node from the CIDR. For example, 24 would allocate
+ 2^8=256 adresses to each node. If this field is not used
+ by the plugin, it can be left unset.
+ format: int32
+ type: integer
+ required:
+ - cidr
+ type: object
+ type: array
+ machineNetwork:
+ description: MachineNetwork is the list of IP address pools for
+ machines.
+ items:
+ description: MachineNetworkEntry is a single IP address block
+ for node IP blocks.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for machines
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ networkType:
+ default: OVNKubernetes
+ description: NetworkType specifies the SDN provider used for cluster
+ networking.
+ enum:
+ - OpenShiftSDN
+ - Calico
+ - OVNKubernetes
+ - Other
+ type: string
+ serviceNetwork:
+ description: 'ServiceNetwork is the list of IP address pools for
+ services. NOTE: currently only one entry is supported.'
+ items:
+ description: ServiceNetworkEntry is a single IP address block
+ for the service network.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for services
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ required:
+ - clusterNetwork
+ - networkType
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector when specified, must be true for the pods
+ managed by the HostedCluster to be scheduled.
+ type: object
+ olmCatalogPlacement:
+ default: management
+ description: OLMCatalogPlacement specifies the placement of OLM catalog
+ components. By default, this is set to management and OLM catalog
+ components are deployed onto the management cluster. If set to guest,
+ the OLM catalog components will be deployed onto the guest cluster.
+ enum:
+ - management
+ - guest
+ type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format or
+ a boolean. If a date is provided: reconciliation is paused on the
+ resource until that date. If the boolean true is provided: reconciliation
+ is paused on the resource until the field is removed.'
+ type: string
+ platform:
+ description: Platform specifies the underlying infrastructure provider
+ for the cluster and is used to configure platform specific behavior.
+ properties:
+ agent:
+ description: Agent specifies configuration for agent-based installations.
+ properties:
+ agentNamespace:
+ description: AgentNamespace is the namespace where to search
+ for Agents for this cluster
+ type: string
+ required:
+ - agentNamespace
+ type: object
+ aws:
+ description: AWS specifies configuration for clusters running
+ on Amazon Web Services.
+ properties:
+ cloudProviderConfig:
+ description: 'CloudProviderConfig specifies AWS networking
+ configuration for the control plane. This is mainly used
+ for cloud provider controller config: https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364
+ TODO(dan): should this be named AWSNetworkConfig?'
+ properties:
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs
+ used to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify
+ an AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC is the VPC to use for control plane cloud
+ resources.
+ type: string
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created.
+ type: string
+ required:
+ - vpc
+ type: object
+ endpointAccess:
+ default: Public
+ description: EndpointAccess specifies the publishing scope
+ of cluster endpoints. The default is Public.
+ enum:
+ - Public
+ - PublicAndPrivate
+ - Private
+ type: string
+ region:
+ description: Region is the AWS region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot AMI
+ for a given release.
+ type: string
+ resourceTags:
+ description: ResourceTags is a list of additional tags to
+ apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports a
+ maximum of 50 tags per resource. OpenShift reserves 25 tags
+ for its use, leaving 25 tags available for the user.
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ rolesRef:
+ description: RolesRef contains references to various AWS IAM
+ roles required to enable integrations such as OIDC.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value
+ referencing a role appropriate for the Control Plane
+ Operator. \n The following is an example of a valid
+ policy document: \n { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:CreateVpcEndpoint\",
+ \"ec2:DescribeVpcEndpoints\", \"ec2:ModifyVpcEndpoint\",
+ \"ec2:DeleteVpcEndpoints\", \"ec2:CreateTags\", \"route53:ListHostedZones\"
+ ], \"Resource\": \"*\" }, { \"Effect\": \"Allow\", \"Action\":
+ [ \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\"
+ ], \"Resource\": \"arn:aws:route53:::%s\" } ] }"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing
+ a role appropriate for the Image Registry Operator.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [ {
+ \"Effect\": \"Allow\", \"Action\": [ \"s3:CreateBucket\",
+ \"s3:DeleteBucket\", \"s3:PutBucketTagging\", \"s3:GetBucketTagging\",
+ \"s3:PutBucketPublicAccessBlock\", \"s3:GetBucketPublicAccessBlock\",
+ \"s3:PutEncryptionConfiguration\", \"s3:GetEncryptionConfiguration\",
+ \"s3:PutLifecycleConfiguration\", \"s3:GetLifecycleConfiguration\",
+ \"s3:GetBucketLocation\", \"s3:ListBucket\", \"s3:GetObject\",
+ \"s3:PutObject\", \"s3:DeleteObject\", \"s3:ListBucketMultipartUploads\",
+ \"s3:AbortMultipartUpload\", \"s3:ListMultipartUploadParts\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust relationship
+ that allows it to be assumed via web identity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ Example: { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\":
+ \"{{ .ProviderARN }}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\",
+ \"Condition\": { \"StringEquals\": { \"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }} } } } ] } \n IngressARN
+ is an ARN value referencing a role appropriate for the
+ Ingress Operator. \n The following is an example of
+ a valid policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"elasticloadbalancing:DescribeLoadBalancers\", \"tag:GetResources\",
+ \"route53:ListHostedZones\" ], \"Resource\": \"*\" },
+ { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\"
+ ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\",
+ \"arn:aws:route53:::PRIVATE_ZONE_ID\" ] } ] }"
+ type: string
+ kubeCloudControllerARN:
+ description: "KubeCloudControllerARN is an ARN value referencing
+ a role appropriate for the KCM/KCC. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:DescribeInstances\",
+ \"ec2:DescribeImages\", \"ec2:DescribeRegions\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\",
+ \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\",
+ \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\",
+ \"elasticloadbalancing:AttachLoadBalancerToSubnets\",
+ \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\",
+ \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\",
+ \"elasticloadbalancing:CreateLoadBalancerListeners\",
+ \"elasticloadbalancing:ConfigureHealthCheck\", \"elasticloadbalancing:DeleteLoadBalancer\",
+ \"elasticloadbalancing:DeleteLoadBalancerListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\",
+ \"elasticloadbalancing:DetachLoadBalancerFromSubnets\",
+ \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\",
+ \"elasticloadbalancing:ModifyLoadBalancerAttributes\",
+ \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\",
+ \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\",
+ \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\",
+ \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\",
+ \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DescribeListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancerPolicies\",
+ \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\",
+ \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\",
+ \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\",
+ \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\" }
+ ] }"
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing a
+ role appropriate for the Network Operator. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\",
+ \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeInstanceStatus\",
+ \"ec2:DescribeInstanceTypes\", \"ec2:UnassignPrivateIpAddresses\",
+ \"ec2:AssignPrivateIpAddresses\", \"ec2:UnassignIpv6Addresses\",
+ \"ec2:AssignIpv6Addresses\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeNetworkInterfaces\" ], \"Resource\": \"*\"
+ } ] }"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value referencing
+ a role appropriate for the CAPI Controller. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:AllocateAddress\",
+ \"ec2:AssociateRouteTable\", \"ec2:AttachInternetGateway\",
+ \"ec2:AuthorizeSecurityGroupIngress\", \"ec2:CreateInternetGateway\",
+ \"ec2:CreateNatGateway\", \"ec2:CreateRoute\", \"ec2:CreateRouteTable\",
+ \"ec2:CreateSecurityGroup\", \"ec2:CreateSubnet\", \"ec2:CreateTags\",
+ \"ec2:DeleteInternetGateway\", \"ec2:DeleteNatGateway\",
+ \"ec2:DeleteRouteTable\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteSubnet\", \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\",
+ \"ec2:DescribeAddresses\", \"ec2:DescribeAvailabilityZones\",
+ \"ec2:DescribeImages\", \"ec2:DescribeInstances\", \"ec2:DescribeInternetGateways\",
+ \"ec2:DescribeNatGateways\", \"ec2:DescribeNetworkInterfaces\",
+ \"ec2:DescribeNetworkInterfaceAttribute\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\",
+ \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\",
+ \"ec2:DisassociateRouteTable\", \"ec2:DisassociateAddress\",
+ \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyNetworkInterfaceAttribute\",
+ \"ec2:ModifySubnetAttribute\", \"ec2:ReleaseAddress\",
+ \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\",
+ \"ec2:TerminateInstances\", \"tag:GetResources\", \"ec2:CreateLaunchTemplate\",
+ \"ec2:CreateLaunchTemplateVersion\", \"ec2:DescribeLaunchTemplates\",
+ \"ec2:DescribeLaunchTemplateVersions\", \"ec2:DeleteLaunchTemplate\",
+ \"ec2:DeleteLaunchTemplateVersions\" ], \"Resource\":
+ [ \"*\" ], \"Effect\": \"Allow\" }, { \"Condition\":
+ { \"StringLike\": { \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"
+ } }, \"Action\": [ \"iam:CreateServiceLinkedRole\" ],
+ \"Resource\": [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"
+ ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\"
+ ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\"
+ ], \"Effect\": \"Allow\" } ] }"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing a
+ role appropriate for the Storage Operator. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\",
+ \"Action\": [ \"ec2:AttachVolume\", \"ec2:CreateSnapshot\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:DeleteSnapshot\",
+ \"ec2:DeleteTags\", \"ec2:DeleteVolume\", \"ec2:DescribeInstances\",
+ \"ec2:DescribeSnapshots\", \"ec2:DescribeTags\", \"ec2:DescribeVolumes\",
+ \"ec2:DescribeVolumesModifications\", \"ec2:DetachVolume\",
+ \"ec2:ModifyVolume\" ], \"Resource\": \"*\" } ] }"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ serviceEndpoints:
+ description: "ServiceEndpoints specifies optional custom endpoints
+ which will override the default service endpoint of specific
+ AWS Services. \n There must be only one ServiceEndpoint
+ for a given service name."
+ items:
+ description: AWSServiceEndpoint stores the configuration
+ for services to override existing defaults of AWS Services.
+ properties:
+ name:
+ description: Name is the name of the AWS service. This
+ must be provided and cannot be empty.
+ type: string
+ url:
+ description: URL is fully qualified URI with scheme
+ https, that overrides the default generated endpoint
+ for a client. This must be provided and cannot be
+ empty.
+ pattern: ^https://
+ type: string
+ required:
+ - name
+ - url
+ type: object
+ type: array
+ required:
+ - region
+ - rolesRef
+ type: object
+ azure:
+ description: Azure defines azure specific settings
+ properties:
+ credentials:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ location:
+ type: string
+ machineIdentityID:
+ type: string
+ resourceGroup:
+ type: string
+ securityGroupName:
+ type: string
+ subnetName:
+ type: string
+ subscriptionID:
+ type: string
+ vnetID:
+ type: string
+ vnetName:
+ type: string
+ required:
+ - credentials
+ - location
+ - machineIdentityID
+ - resourceGroup
+ - securityGroupName
+ - subnetName
+ - subscriptionID
+ - vnetID
+ - vnetName
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ powervs:
+ description: PowerVS specifies configuration for clusters running
+ on IBMCloud Power VS Service. This field is immutable. Once
+ set, It can't be changed.
+ properties:
+ accountID:
+ description: AccountID is the IBMCloud account id. This field
+ is immutable. Once set, It can't be changed.
+ type: string
+ cisInstanceCRN:
+ description: CISInstanceCRN is the IBMCloud CIS Service Instance's
+ Cloud Resource Name This field is immutable. Once set, It
+ can't be changed.
+ pattern: '^crn:'
+ type: string
+ ingressOperatorCloudCreds:
+ description: IngressOperatorCloudCreds is a reference to a
+ secret containing ibm cloud credentials for ingress operator
+ to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ kubeCloudControllerCreds:
+ description: "KubeCloudControllerCreds is a reference to a
+ secret containing cloud credentials with permissions matching
+ the cloud controller policy. This field is immutable. Once
+ set, It can't be changed. \n TODO(dan): document the \"cloud
+ controller policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ nodePoolManagementCreds:
+ description: "NodePoolManagementCreds is a reference to a
+ secret containing cloud credentials with permissions matching
+ the node pool management policy. This field is immutable.
+ Once set, It can't be changed. \n TODO(dan): document the
+ \"node pool management policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ region:
+ description: Region is the IBMCloud region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot image
+ for a given release. This field is immutable. Once set,
+ It can't be changed.
+ type: string
+ resourceGroup:
+ description: ResourceGroup is the IBMCloud Resource Group
+ in which the cluster resides. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ serviceInstanceID:
+ description: "ServiceInstance is the reference to the Power
+ VS service on which the server instance(VM) will be created.
+ Power VS service is a container for all Power VS instances
+ at a specific geographic region. serviceInstance can be
+ created via IBM Cloud catalog or CLI. ServiceInstanceID
+ is the unique identifier that can be obtained from IBM Cloud
+ UI or IBM Cloud cli. \n More detail about Power VS service
+ instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ \n This field is immutable. Once set, It can't be changed."
+ type: string
+ storageOperatorCloudCreds:
+ description: StorageOperatorCloudCreds is a reference to a
+ secret containing ibm cloud credentials for storage operator
+ to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources. This field is immutable. Once set, It can't
+ be changed.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC specifies IBM Cloud PowerVS Load Balancing
+ configuration for the control plane. This field is immutable.
+ Once set, It can't be changed.
+ properties:
+ name:
+ description: Name for VPC to used for all the service
+ load balancer. This field is immutable. Once set, It
+ can't be changed.
+ type: string
+ region:
+ description: Region is the IBMCloud region in which VPC
+ gets created, this VPC used for all the ingress traffic
+ into the OCP cluster. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ subnet:
+ description: Subnet is the subnet to use for load balancer.
+ This field is immutable. Once set, It can't be changed.
+ type: string
+ zone:
+ description: Zone is the availability zone where load
+ balancer cloud resources are created. This field is
+ immutable. Once set, It can't be changed.
+ type: string
+ required:
+ - name
+ - region
+ type: object
+ zone:
+ description: Zone is the availability zone where control plane
+ cloud resources are created. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ required:
+ - accountID
+ - cisInstanceCRN
+ - ingressOperatorCloudCreds
+ - kubeCloudControllerCreds
+ - nodePoolManagementCreds
+ - region
+ - resourceGroup
+ - serviceInstanceID
+ - storageOperatorCloudCreds
+ - subnet
+ - vpc
+ - zone
+ type: object
+ type:
+ description: Type is the type of infrastructure provider for the
+ cluster.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ pullSecret:
+ description: PullSecret references a pull secret to be injected into
+ the container runtime of all cluster nodes. The secret must have
+ a key named ".dockerconfigjson" whose value is the pull secret JSON.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ release:
+ description: "Release specifies the desired OCP release payload for
+ the hosted cluster. \n Updating this field will trigger a rollout
+ of the control plane. The behavior of the rollout will be driven
+ by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy."
+ properties:
+ image:
+ description: Image is the image pullspec of an OCP release payload
+ image.
+ pattern: ^(\w+\S+)$
+ type: string
+ required:
+ - image
+ type: object
+ secretEncryption:
+ description: SecretEncryption specifies a Kubernetes secret encryption
+ strategy for the control plane.
+ properties:
+ aescbc:
+ description: AESCBC defines metadata about the AESCBC secret encryption
+ strategy
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to encrypt
+ new secrets
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ backupKey:
+ description: BackupKey defines the old key during the rotation
+ process so previously created secrets can continue to be
+ decrypted until they are all re-encrypted with the active
+ key.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - activeKey
+ type: object
+ kms:
+ description: KMS defines metadata about the kms secret encryption
+ strategy
+ properties:
+ aws:
+ description: AWS defines metadata about the configuration
+ of the AWS KMS Secret Encryption provider
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to
+ encrypt new secrets
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for the
+ encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ auth:
+ description: Auth defines metadata about the management
+ of credentials used to interact with AWS KMS
+ properties:
+ credentials:
+ description: Credentials contains the name of the
+ secret that holds the aws credentials that can be
+ used to make the necessary KMS calls. It should
+ at key AWSCredentialsFileSecretKey contain the aws
+ credentials file that can be used to configure AWS
+ SDKs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ backupKey:
+ description: BackupKey defines the old key during the
+ rotation process so previously created secrets can continue
+ to be decrypted until they are all re-encrypted with
+ the active key.
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for the
+ encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ region:
+ description: Region contains the AWS region
+ type: string
+ required:
+ - activeKey
+ - auth
+ - region
+ type: object
+ ibmcloud:
+ description: IBMCloud defines metadata for the IBM Cloud KMS
+ encryption strategy
+ properties:
+ auth:
+ description: Auth defines metadata for how authentication
+ is done with IBM Cloud KMS
+ properties:
+ managed:
+ description: Managed defines metadata around the service
+ to service authentication strategy for the IBM Cloud
+ KMS system (all provider managed).
+ type: object
+ type:
+ description: Type defines the IBM Cloud KMS authentication
+ strategy
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged defines the auth metadata the
+ customer provides to interact with IBM Cloud KMS
+ properties:
+ credentials:
+ description: Credentials should reference a secret
+ with a key field of IBMCloudIAMAPIKeySecretKey
+ that contains a apikey to call IBM Cloud KMS
+ APIs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ required:
+ - type
+ type: object
+ keyList:
+ description: KeyList defines the list of keys used for
+ data encryption
+ items:
+ description: IBMCloudKMSKeyEntry defines metadata for
+ an IBM Cloud KMS encryption key
+ properties:
+ correlationID:
+ description: CorrelationID is an identifier used
+ to track all api call usage from hypershift
+ type: string
+ crkID:
+ description: CRKID is the customer rook key id
+ type: string
+ instanceID:
+ description: InstanceID is the id for the key protect
+ instance
+ type: string
+ keyVersion:
+ description: KeyVersion is a unique number associated
+ with the key. The number increments whenever a
+ new key is enabled for data encryption.
+ type: integer
+ url:
+ description: URL is the url to call key protect
+ apis over
+ pattern: ^https://
+ type: string
+ required:
+ - correlationID
+ - crkID
+ - instanceID
+ - keyVersion
+ - url
+ type: object
+ type: array
+ region:
+ description: Region is the IBM Cloud region
+ type: string
+ required:
+ - auth
+ - keyList
+ - region
+ type: object
+ provider:
+ description: Provider defines the KMS provider
+ enum:
+ - IBMCloud
+ - AWS
+ type: string
+ required:
+ - provider
+ type: object
+ type:
+ description: Type defines the type of kube secret encryption being
+ used
+ enum:
+ - kms
+ - aescbc
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountSigningKey:
+ description: ServiceAccountSigningKey is a reference to a secret containing
+ the private key used by the service account token issuer. The secret
+ is expected to contain a single key named "key". If not specified,
+ a service account signing key will be generated automatically for
+ the cluster. When specifying a service account signing key, a IssuerURL
+ must also be specified.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ services:
+ description: "Services specifies how individual control plane services
+ are published from the hosting cluster of the control plane. \n
+ If a given service is not present in this list, it will be exposed
+ publicly by default."
+ items:
+ description: ServicePublishingStrategyMapping specifies how individual
+ control plane services are published from the hosting cluster
+ of a control plane.
+ properties:
+ service:
+ description: Service identifies the type of service being published.
+ enum:
+ - APIServer
+ - OAuthServer
+ - OIDC
+ - Konnectivity
+ - Ignition
+ - OVNSbDb
+ type: string
+ servicePublishingStrategy:
+ description: ServicePublishingStrategy specifies how to publish
+ Service.
+ properties:
+ loadBalancer:
+ description: LoadBalancer configures exposing a service
+ using a LoadBalancer.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the LoadBalancer.
+ type: string
+ type: object
+ nodePort:
+ description: NodePort configures exposing a service using
+ a NodePort.
+ properties:
+ address:
+ description: Address is the host/ip that the NodePort
+ service is exposed over.
+ type: string
+ port:
+ description: Port is the port of the NodePort service.
+ If <=0, the port is dynamically assigned when the
+ service is created.
+ format: int32
+ type: integer
+ required:
+ - address
+ type: object
+ route:
+ description: Route configures exposing a service using a
+ Route.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the Route.
+ type: string
+ type: object
+ type:
+ description: Type is the publishing strategy used for the
+ service.
+ enum:
+ - LoadBalancer
+ - NodePort
+ - Route
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - service
+ - servicePublishingStrategy
+ type: object
+ type: array
+ sshKey:
+ description: SSHKey references an SSH key to be injected into all
+ cluster node sshd servers. The secret must have a single key "id_rsa.pub"
+ whose value is the public part of an SSH key.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - networking
+ - platform
+ - pullSecret
+ - release
+ - services
+ - sshKey
+ type: object
+ status:
+ description: Status is the latest observed status of the HostedCluster.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of a control plane's current state.
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint contains the endpoint information
+ by which external clients can access the control plane. This is
+ populated after the infrastructure is ready.
+ properties:
+ host:
+ description: Host is the hostname on which the API server is serving.
+ type: string
+ port:
+ description: Port is the port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ ignitionEndpoint:
+ description: IgnitionEndpoint is the endpoint injected in the ign
+ config userdata. It exposes the config for instances to become kubernetes
+ nodes.
+ type: string
+ kubeadminPassword:
+ description: KubeadminPassword is a reference to the secret that contains
+ the initial kubeadmin user password for the guest cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ kubeconfig:
+ description: KubeConfig is a reference to the secret containing the
+ default kubeconfig for the cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ oauthCallbackURLTemplate:
+ description: OAuthCallbackURLTemplate contains a template for the
+ URL to use as a callback for identity providers. The [identity-provider-name]
+ placeholder must be replaced with the name of an identity provider
+ defined on the HostedCluster. This is populated after the infrastructure
+ is ready.
+ type: string
+ version:
+ description: Version is the status of the release version applied
+ to the HostedCluster.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image and
+ associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates that
+ may be recommended for this cluster if it meets specific required
+ conditions. Consumers interested in the set of updates that
+ are actually recommended for this cluster should use availableUpdates.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an empty or invalid channel
+ has been specified.
+ items:
+ description: ConditionalUpdate represents an update which is
+ recommended to some clusters on the version the current cluster
+ is reconciling, but which may not be recommended for the current
+ cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules. *
+ Recommended, for whether the update is recommended for
+ the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type FooStatus
+ struct{ // Represents the observations of a foo's current
+ state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the
+ condition transitioned from one status to another.
+ This should be when the underlying condition changed. If
+ that is not known, then using the time when the
+ API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty
+ string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9, the
+ condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last transition.
+ Producers of specific condition types may define
+ expected values and meanings for this field, and
+ whether the values are considered a guaranteed API.
+ The value should be a CamelCase string. This field
+ may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful (see
+ .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the
+ availableUpdates field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update API
+ and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and which
+ do not. The slice is ordered by decreasing precedence.
+ The cluster-version operator will walk the slice
+ in order, and stop after the first it can successfully
+ evaluate. If no condition can be successfully evaluated,
+ the update will not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query classifying
+ clusters. This query query should return
+ a 1 in the match case and a 0 in the does-not-match
+ case. Queries which return no time series,
+ or which return values besides 0 or 1,
+ are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that matchingRules
+ match the cluster state. This is only to be consumed
+ by humans. It may contain Line Feed characters (U+000A),
+ which should be rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels to
+ which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should be
+ displayed as a link in user interfaces. The URL field may
+ not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is ordered
+ by recency. Updates in the history have state Completed if the
+ rollout completed - if an update was failing or halfway applied
+ the state will be Partial. Only a limited amount of update history
+ is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to the
+ cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition an
+ Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion time
+ will always be set for entries that are not the current
+ update (usually to the started time of the next update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that contains
+ the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not define
+ a version, or if a failure occurs retrieving the image,
+ this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the spec
+ is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
- desired
- observedGeneration
type: object
- required:
- - conditions
type: object
type: object
served: true
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml
index 77c4ef3c986..e9fef901cd9 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml
@@ -119,6 +119,12 @@ spec:
format: int32
type: integer
type: object
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for
+ production clusters.
+ type: string
clusterID:
description: ClusterID is the unique id that identifies the cluster
externally. Making it optional here allows us to keep compatibility
@@ -1990,6 +1996,10 @@ spec:
The default value is SingleReplica.
type: string
issuerURL:
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which only
+ works for in-cluster validation.
type: string
kubeconfig:
description: KubeConfig specifies the name and key for the kubeconfig
@@ -2723,6 +2733,8 @@ spec:
type: object
x-kubernetes-map-type: atomic
releaseImage:
+ description: ReleaseImage is the release image applied to the hosted
+ control plane.
type: string
secretEncryption:
description: SecretEncryption contains metadata about the kubernetes
@@ -3166,8 +3178,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
lastReleaseImageTransitionTime:
- description: lastReleaseImageTransitionTime is the time of the last
- update to the current releaseImage property.
+ description: "lastReleaseImageTransitionTime is the time of the last
+ update to the current releaseImage property. \n Deprecated: Use
+ versionStatus.history[0].startedTime instead."
format: date-time
type: string
oauthCallbackURLTemplate:
@@ -3183,15 +3196,3804 @@ spec:
is ready to receive requests This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
type: boolean
releaseImage:
- description: ReleaseImage is the release image applied to the hosted
- control plane.
+ description: "ReleaseImage is the release image applied to the hosted
+ control plane. \n Deprecated: Use versionStatus.desired.image instead."
type: string
version:
- description: Version is the semantic version of the release applied
- by the hosted control plane operator
+ description: "Version is the semantic version of the release applied
+ by the hosted control plane operator \n Deprecated: Use versionStatus.desired.version
+ instead."
+ type: string
+ versionStatus:
+ description: versionStatus is the status of the release version applied
+ by the hosted control plane operator.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image and
+ associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates that
+ may be recommended for this cluster if it meets specific required
+ conditions. Consumers interested in the set of updates that
+ are actually recommended for this cluster should use availableUpdates.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an empty or invalid channel
+ has been specified.
+ items:
+ description: ConditionalUpdate represents an update which is
+ recommended to some clusters on the version the current cluster
+ is reconciling, but which may not be recommended for the current
+ cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules. *
+ Recommended, for whether the update is recommended for
+ the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type FooStatus
+ struct{ // Represents the observations of a foo's current
+ state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the
+ condition transitioned from one status to another.
+ This should be when the underlying condition changed. If
+ that is not known, then using the time when the
+ API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty
+ string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9, the
+ condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last transition.
+ Producers of specific condition types may define
+ expected values and meanings for this field, and
+ whether the values are considered a guaranteed API.
+ The value should be a CamelCase string. This field
+ may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful (see
+ .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the
+ availableUpdates field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update API
+ and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and which
+ do not. The slice is ordered by decreasing precedence.
+ The cluster-version operator will walk the slice
+ in order, and stop after the first it can successfully
+ evaluate. If no condition can be successfully evaluated,
+ the update will not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query classifying
+ clusters. This query query should return
+ a 1 in the match case and a 0 in the does-not-match
+ case. Queries which return no time series,
+ or which return values besides 0 or 1,
+ are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that matchingRules
+ match the cluster state. This is only to be consumed
+ by humans. It may contain Line Feed characters (U+000A),
+ which should be rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels to
+ which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should be
+ displayed as a link in user interfaces. The URL field may
+ not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is ordered
+ by recency. Updates in the history have state Completed if the
+ rollout completed - if an update was failing or halfway applied
+ the state will be Partial. Only a limited amount of update history
+ is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to the
+ cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition an
+ Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion time
+ will always be set for entries that are not the current
+ update (usually to the started time of the next update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that contains
+ the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not define
+ a version, or if a failure occurs retrieving the image,
+ this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the spec
+ is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
+ required:
+ - initialized
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: HostedControlPlane defines the desired state of HostedControlPlane
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostedControlPlaneSpec defines the desired state of HostedControlPlane
+ properties:
+ additionalTrustBundle:
+ description: AdditionalTrustBundle references a ConfigMap containing
+ a PEM-encoded X.509 certificate bundle
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ auditWebhook:
+ description: AuditWebhook contains metadata for configuring an audit
+ webhook endpoint for a cluster to process cluster audit events.
+ It references a secret that contains the webhook information for
+ the audit webhook endpoint. It is a secret because if the endpoint
+ has MTLS the kubeconfig will contain client keys. This is currently
+ only supported in IBM Cloud. The kubeconfig needs to be stored in
+ the secret with a secret key name that corresponds to the constant
+ AuditWebhookKubeconfigKey.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ autoscaling:
+ description: Autoscaling specifies auto-scaling behavior that applies
+ to all NodePools associated with the control plane.
+ properties:
+ maxNodeProvisionTime:
+ description: MaxNodeProvisionTime is the maximum time to wait
+ for node provisioning before considering the provisioning to
+ be unsuccessful, expressed as a Go duration string. The default
+ is 15 minutes.
+ pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$
+ type: string
+ maxNodesTotal:
+ description: MaxNodesTotal is the maximum allowable number of
+ nodes across all NodePools for a HostedCluster. The autoscaler
+ will not grow the cluster beyond this number.
+ format: int32
+ minimum: 0
+ type: integer
+ maxPodGracePeriod:
+ description: MaxPodGracePeriod is the maximum seconds to wait
+ for graceful pod termination before scaling down a NodePool.
+ The default is 600 seconds.
+ format: int32
+ minimum: 0
+ type: integer
+ podPriorityThreshold:
+ description: "PodPriorityThreshold enables users to schedule \"best-effort\"
+ pods, which shouldn't trigger autoscaler actions, but only run
+ when there are spare resources available. The default is -10.
+ \n See the following for more details: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption"
+ format: int32
+ type: integer
+ type: object
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for
+ production clusters.
+ type: string
+ clusterID:
+ description: ClusterID is the unique id that identifies the cluster
+ externally. Making it optional here allows us to keep compatibility
+ with previous versions of the control-plane-operator that have no
+ knowledge of this field.
type: string
+ configuration:
+ description: 'Configuration embeds resources that correspond to the
+ openshift configuration API: https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html'
+ properties:
+ apiServer:
+ description: APIServer holds configuration (like serving certificates,
+ client CA and CORS domains) shared by all API servers in the
+ system, among them especially kube-apiserver and openshift-apiserver.
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional,
+ user-defined regular expressions describing hosts for which
+ the API server allows access using the CORS headers. This
+ may be needed to access the API and the integrated OAuth
+ server from JavaScript applications. The values are regular
+ expressions that correspond to the Golang regular expression
+ language.
+ items:
+ type: string
+ type: array
+ audit:
+ default:
+ profile: Default
+ description: audit specifies the settings for audit configuration
+ to be applied to all OpenShift-provided API servers in the
+ cluster.
+ properties:
+ customRules:
+ description: customRules specify profiles per group. These
+ profile take precedence over the top-level profile field
+ if they apply. They are evaluation from top to bottom
+ and the first one that matches, applies.
+ items:
+ description: AuditCustomRule describes a custom rule
+ for an audit profile that takes precedence over the
+ top-level profile.
+ properties:
+ group:
+ description: group is a name of group a request
+ user must be member of in order to this profile
+ to apply.
+ minLength: 1
+ type: string
+ profile:
+ description: "profile specifies the name of the
+ desired audit policy configuration to be deployed
+ to all OpenShift-provided API servers in the cluster.
+ \n The following profiles are provided: - Default:
+ the existing default policy. - WriteRequestBodies:
+ like 'Default', but logs request and response
+ HTTP payloads for write requests (create, update,
+ patch). - AllRequestBodies: like 'WriteRequestBodies',
+ but also logs request and response HTTP payloads
+ for read requests (get, list). - None: no requests
+ are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n If unset, the 'Default'
+ profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ required:
+ - group
+ - profile
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - group
+ x-kubernetes-list-type: map
+ profile:
+ default: Default
+ description: "profile specifies the name of the desired
+ top-level audit profile to be applied to all requests
+ sent to any of the OpenShift-provided API servers in
+ the cluster (kube-apiserver, openshift-apiserver and
+ oauth-apiserver), with the exception of those requests
+ that match one or more of the customRules. \n The following
+ profiles are provided: - Default: default policy which
+ means MetaData level logging with the exception of events
+ (not logged at all), oauthaccesstokens and oauthauthorizetokens
+ (both logged at RequestBody level). - WriteRequestBodies:
+ like 'Default', but logs request and response HTTP payloads
+ for write requests (create, update, patch). - AllRequestBodies:
+ like 'WriteRequestBodies', but also logs request and
+ response HTTP payloads for read requests (get, list).
+ - None: no requests are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n Warning: It is not recommended
+ to disable audit logging by using the `None` profile
+ unless you are fully aware of the risks of not logging
+ data that can be beneficial when troubleshooting issues.
+ If you disable audit logging and a support situation
+ arises, you might need to enable audit logging and reproduce
+ the issue in order to troubleshoot properly. \n If unset,
+ the 'Default' profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ type: object
+ clientCA:
+ description: 'clientCA references a ConfigMap containing a
+ certificate bundle for the signers that will be recognized
+ for incoming client certificates in addition to the operator
+ managed signers. If this is empty, then only operator managed
+ signers are valid. You usually only have to set this if
+ you have your own PKI you wish to honor client certificates
+ from. The ConfigMap must exist in the openshift-config namespace
+ and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
+ - CA bundle.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ encryption:
+ description: encryption allows the configuration of encryption
+ of resources at the datastore layer.
+ properties:
+ type:
+ description: "type defines what encryption type should
+ be used to encrypt resources at the datastore layer.
+ When this field is unset (i.e. when it is set to the
+ empty string), identity is implied. The behavior of
+ unset can and will change over time. Even if encryption
+ is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in best
+ practices. \n When encryption is enabled, all sensitive
+ resources shipped with the platform are encrypted. This
+ list of sensitive resources can and will change over
+ time. The current authoritative list is: \n 1. secrets
+ 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io
+ 5. oauthauthorizetokens.oauth.openshift.io"
+ enum:
+ - ""
+ - identity
+ - aescbc
+ type: string
+ type: object
+ servingCerts:
+ description: servingCert is the TLS cert info for serving
+ secure traffic. If not specified, operator managed certificates
+ will be used for serving secure traffic.
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing
+ the TLS cert info for serving secure traffic to specific
+ hostnames. If no named certificates are provided, or
+ no named certificates match the server name as understood
+ by a client, the defaultServingCertificate will be used.
+ items:
+ description: APIServerNamedServingCert maps a server
+ DNS name, as understood by a client, to a certificate.
+ properties:
+ names:
+ description: names is a optional list of explicit
+ DNS names (leading wildcards allowed) that should
+ use this certificate to serve secure traffic.
+ If no names are provided, the implicit names will
+ be extracted from the certificates. Exact names
+ trump over wildcard names. Explicit names defined
+ here trump over extracted implicit names.
+ items:
+ type: string
+ type: array
+ servingCertificate:
+ description: 'servingCertificate references a kubernetes.io/tls
+ type secret containing the TLS cert info for serving
+ secure traffic. The secret must exist in the openshift-config
+ namespace and contain the following required fields:
+ - Secret.Data["tls.key"] - TLS private key. -
+ Secret.Data["tls.crt"] - TLS certificate.'
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for TLS
+ connections for externally exposed servers. \n If unset,
+ a default (which may change between releases) is chosen.
+ Note that only Old, Intermediate and Custom profiles are
+ currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security profile.
+ Be extremely careful using a custom profile as invalid
+ configurations can be catastrophic. An example custom
+ profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305
+ - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1"
+ nullable: true
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher
+ algorithms that are negotiated during the TLS handshake.
+ \ Operators may remove entries their operands do
+ not support. For example, to use DES-CBC3-SHA (yaml):
+ \n ciphers: - DES-CBC3-SHA"
+ items:
+ type: string
+ type: array
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the
+ minimal version of the TLS protocol that is negotiated
+ during the TLS handshake. For example, to use TLS
+ versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion:
+ TLSv1.1 \n NOTE: currently the highest minTLSVersion
+ allowed is VersionTLS12"
+ enum:
+ - VersionTLS10
+ - VersionTLS11
+ - VersionTLS12
+ - VersionTLS13
+ type: string
+ type: object
+ intermediate:
+ description: "intermediate is a TLS security profile based
+ on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ minTLSVersion: TLSv1.2"
+ nullable: true
+ type: object
+ modern:
+ description: "modern is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ nullable: true
+ type: object
+ old:
+ description: "old is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
+ ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
+ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256
+ - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384
+ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA
+ - DES-CBC3-SHA minTLSVersion: TLSv1.0"
+ nullable: true
+ type: object
+ type:
+ description: "type is one of Old, Intermediate, Modern
+ or Custom. Custom provides the ability to specify individual
+ TLS security profile parameters. Old, Intermediate and
+ Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change
+ over time as new ciphers are developed and existing
+ ciphers are found to be insecure. Depending on precisely
+ which ciphers are available to a process, the list may
+ be reduced. \n Note that the Modern profile is currently
+ not supported because it is not yet well adopted by
+ common software libraries."
+ enum:
+ - Old
+ - Intermediate
+ - Modern
+ - Custom
+ type: string
+ type: object
+ type: object
+ authentication:
+ description: Authentication specifies cluster-wide settings for
+ authentication (like OAuth and webhook token authenticators).
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for an
+ external OAuth server. This discovery document can be viewed
+ from its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence
+ over any metadata reference stored in status. The key "oauthMetadata"
+ is used to locate the data. If specified and the config
+ map or expected key is not found, no metadata is served.
+ If the specified metadata is not valid, no metadata is served.
+ The namespace for this config map is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ serviceAccountIssuer:
+ description: 'serviceAccountIssuer is the identifier of the
+ bound service account token issuer. The default is https://kubernetes.default.svc
+ WARNING: Updating this field will result in the invalidation
+ of all bound tokens with the previous issuer value. Unless
+ the holder of a bound token has explicit support for a change
+ in issuer, they will not request a new bound token until
+ pod restart or until their existing token exceeds 80% of
+ its duration.'
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing
+ authentication mode in use. Specifically, it manages the
+ component that responds to login attempts. The default is
+ IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticator:
+ description: webhookTokenAuthenticator configures a remote
+ token reviewer. These remote authentication webhooks can
+ be used to verify bearer tokens via the tokenreviews.authentication.k8s.io
+ REST API. This is required to honor bearer tokens that are
+ provisioned by an external authentication service.
+ properties:
+ kubeConfig:
+ description: "kubeConfig references a secret that contains
+ kube config file data which describes how to access
+ the remote webhook service. The namespace for the referenced
+ secret is openshift-config. \n For further details,
+ see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ \n The key \"kubeConfig\" is used to locate the data.
+ If the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data is
+ not valid, the webhook is not honored."
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - kubeConfig
+ type: object
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators is DEPRECATED, setting
+ it has no effect.
+ items:
+ description: deprecatedWebhookTokenAuthenticator holds the
+ necessary configuration options for a remote token authenticator.
+ It's the same as WebhookTokenAuthenticator but it's missing
+ the 'required' validation on KubeConfig field.
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file data
+ which describes how to access the remote webhook service.
+ For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data. If
+ the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data
+ is not valid, the webhook is not honored. The namespace
+ for this secret is determined by the point of use.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ featureGate:
+ description: FeatureGate holds cluster-wide information about
+ feature gates.
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling
+ of any feature. Turning this feature set on IS NOT SUPPORTED,
+ CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its
+ nature, this setting cannot be validated. If you have any
+ typos or accidentally apply invalid combinations your cluster
+ may fail in an unrecoverable way. featureSet must equal
+ "CustomNoUpgrade" must be set to use this field.
+ nullable: true
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates that
+ you want to force off
+ items:
+ type: string
+ type: array
+ enabled:
+ description: enabled is a list of all feature gates that
+ you want to force on
+ items:
+ type: string
+ type: array
+ type: object
+ featureSet:
+ description: featureSet changes the list of features in the
+ cluster. The default is empty. Be very careful adjusting
+ this setting. Turning on or off features may cause irreversible
+ changes in your cluster which cannot be undone.
+ type: string
+ type: object
+ image:
+ description: Image governs policies related to imagestream imports
+ and runtime configuration for external registries. It allows
+ cluster admins to configure which registries OpenShift is allowed
+ to import images from, extra CA trust bundles for external registries,
+ and policies to block or allow registry hostnames. When exposing
+ OpenShift's image registry to the public, this also lets cluster
+ admins specify the external hostname.
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap
+ containing additional CAs that should be trusted during
+ imagestream import, pod image pull, build image pull, and
+ imageregistry pullthrough. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container
+ image registries that normal users may import images from.
+ Set this list to the registries that you trust to contain
+ valid Docker images and that you want applications to be
+ able to import from. Users with permission to create Images
+ or ImageStreamMappings via the API are not affected by this
+ policy - typically only administrators or system integrations
+ will have those permissions.
+ items:
+ description: RegistryLocation contains a location of the
+ registry specified by the registry domain name. The domain
+ name might include wildcards, like '*' or '??'.
+ properties:
+ domainName:
+ description: domainName specifies a domain name for
+ the registry In case the registry use non-standard
+ (80 or 443) port, the port should be included in the
+ domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry
+ is secure (https) or insecure (http) By default (if
+ not specified) the registry is assumed as secure.
+ type: boolean
+ type: object
+ type: array
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames
+ for the default external image registry. The external hostname
+ should be set only when the image registry is exposed externally.
+ The first value is used in 'publicDockerImageRepository'
+ field in ImageStreams. The value must be in "hostname[:port]"
+ format.
+ items:
+ type: string
+ type: array
+ registrySources:
+ description: registrySources contains configuration that determines
+ how the container runtime should treat individual registries
+ when accessing images for builds+pods. (e.g. whether or
+ not to allow insecure access). It does not contain configuration
+ for the internal cluster registry.
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are the only registries
+ permitted for image pull and push actions. All other
+ registries are denied. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ items:
+ type: string
+ type: array
+ blockedRegistries:
+ description: "blockedRegistries cannot be used for image
+ pull and push actions. All other registries are permitted.
+ \n Only one of BlockedRegistries or AllowedRegistries
+ may be set."
+ items:
+ type: string
+ type: array
+ containerRuntimeSearchRegistries:
+ description: 'containerRuntimeSearchRegistries are registries
+ that will be searched when pulling images that do not
+ have fully qualified domains in their pull specs. Registries
+ will be searched in the order provided in the list.
+ Note: this search list only works with the container
+ runtime, i.e CRI-O. Will NOT work with builds or imagestream
+ imports.'
+ format: hostname
+ items:
+ type: string
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ insecureRegistries:
+ description: insecureRegistries are registries which do
+ not have a valid TLS certificates or only support HTTP
+ connections.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ ingress:
+ description: Ingress holds cluster-wide information about ingress,
+ including the default ingress domain used for routes.
+ properties:
+ appsDomain:
+ description: appsDomain is an optional domain to use instead
+ of the one specified in the domain field when a Route is
+ created without specifying an explicit host. If appsDomain
+ is nonempty, this value is used to generate default host
+ values for Route. Unlike domain, appsDomain may be modified
+ after installation. This assumes a new ingresscontroller
+ has been setup with a wildcard certificate.
+ type: string
+ componentRoutes:
+ description: "componentRoutes is an optional list of routes
+ that are managed by OpenShift components that a cluster-admin
+ is able to configure the hostname and serving certificate
+ for. The namespace and name of each route in this list should
+ match an existing entry in the status.componentRoutes list.
+ \n To determine the set of configurable Routes, look at
+ namespace and name of entries in the .status.componentRoutes
+ list, where participating operators write the status of
+ configurable routes."
+ items:
+ description: ComponentRouteSpec allows for configuration
+ of a route's hostname and serving certificate.
+ properties:
+ hostname:
+ description: hostname is the hostname that should be
+ used by the route.
+ pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$
+ type: string
+ name:
+ description: "name is the logical name of the route
+ to customize. \n The namespace and name of this componentRoute
+ must match a corresponding entry in the list of status.componentRoutes
+ if the route is to be customized."
+ maxLength: 256
+ minLength: 1
+ type: string
+ namespace:
+ description: "namespace is the namespace of the route
+ to customize. \n The namespace and name of this componentRoute
+ must match a corresponding entry in the list of status.componentRoutes
+ if the route is to be customized."
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ servingCertKeyPairSecret:
+ description: servingCertKeyPairSecret is a reference
+ to a secret of type `kubernetes.io/tls` in the openshift-config
+ namespace. The serving cert/key pair must match and
+ will be used by the operator to fulfill the intent
+ of serving with this name. If the custom hostname
+ uses the default routing suffix of the cluster, the
+ Secret specification for a serving certificate will
+ not be needed.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - hostname
+ - name
+ - namespace
+ type: object
+ type: array
+ domain:
+ description: "domain is used to generate a default host name
+ for a route when the route's host name is empty. The generated
+ host name will follow this pattern: \"..\".
+ \n It is also used as the default wildcard domain suffix
+ for ingress. The default ingresscontroller domain will follow
+ this pattern: \"*.\". \n Once set, changing domain
+ is not currently supported."
+ type: string
+ requiredHSTSPolicies:
+ description: "requiredHSTSPolicies specifies HSTS policies
+ that are required to be set on newly created or updated
+ routes matching the domainPattern/s and namespaceSelector/s
+ that are specified in the policy. Each requiredHSTSPolicy
+ must have at least a domainPattern and a maxAge to validate
+ a route HSTS Policy route annotation, and affect route admission.
+ \n A candidate route is checked for HSTS Policies if it
+ has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\"
+ E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains
+ \n - For each candidate route, if it matches a requiredHSTSPolicy
+ domainPattern and optional namespaceSelector, then the maxAge,
+ preloadPolicy, and includeSubdomainsPolicy must be valid
+ to be admitted. Otherwise, the route is rejected. - The
+ first match, by domainPattern and optional namespaceSelector,
+ in the ordering of the RequiredHSTSPolicies determines the
+ route's admission status. - If the candidate route doesn't
+ match any requiredHSTSPolicy domainPattern and optional
+ namespaceSelector, then it may use any HSTS Policy annotation.
+ \n The HSTS policy configuration may be changed after routes
+ have already been created. An update to a previously admitted
+ route may then fail if the updated route does not conform
+ to the updated HSTS policy configuration. However, changing
+ the HSTS policy configuration will not cause a route that
+ is already admitted to stop working. \n Note that if there
+ are no RequiredHSTSPolicies, any HSTS Policy annotation
+ on the route is valid."
+ items:
+ properties:
+ domainPatterns:
+ description: "domainPatterns is a list of domains for
+ which the desired HSTS annotations are required. If
+ domainPatterns is specified and a route is created
+ with a spec.host matching one of the domains, the
+ route must specify the HSTS Policy components described
+ in the matching RequiredHSTSPolicy. \n The use of
+ wildcards is allowed like this: *.foo.com matches
+ everything under foo.com. foo.com only matches foo.com,
+ so to cover foo.com and everything under it, you must
+ specify *both*."
+ items:
+ type: string
+ minItems: 1
+ type: array
+ includeSubDomainsPolicy:
+ description: 'includeSubDomainsPolicy means the HSTS
+ Policy should apply to any subdomains of the host''s
+ domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy
+ was set to RequireIncludeSubDomains: - the host app.bar.foo.com
+ would inherit the HSTS Policy of bar.foo.com - the
+ host bar.foo.com would inherit the HSTS Policy of
+ bar.foo.com - the host foo.com would NOT inherit the
+ HSTS Policy of bar.foo.com - the host def.foo.com
+ would NOT inherit the HSTS Policy of bar.foo.com'
+ enum:
+ - RequireIncludeSubDomains
+ - RequireNoIncludeSubDomains
+ - NoOpinion
+ type: string
+ maxAge:
+ description: maxAge is the delta time range in seconds
+ during which hosts are regarded as HSTS hosts. If
+ set to 0, it negates the effect, and hosts are removed
+ as HSTS hosts. If set to 0 and includeSubdomains is
+ specified, all subdomains of the host are also removed
+ as HSTS hosts. maxAge is a time-to-live value, and
+ if this policy is not refreshed on a client, the HSTS
+ policy will eventually expire on that client.
+ properties:
+ largestMaxAge:
+ description: The largest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age This value can
+ be left unspecified, in which case no upper limit
+ is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ smallestMaxAge:
+ description: The smallest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age Setting max-age=0
+ allows the deletion of an existing HSTS header
+ from a host. This is a necessary tool for administrators
+ to quickly correct mistakes. This value can be
+ left unspecified, in which case no lower limit
+ is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ type: object
+ namespaceSelector:
+ description: namespaceSelector specifies a label selector
+ such that the policy applies only to those routes
+ that are in namespaces with labels that match the
+ selector, and are in one of the DomainPatterns. Defaults
+ to the empty LabelSelector, which matches everything.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a
+ selector that contains values, a key, and an
+ operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If the
+ operator is Exists or DoesNotExist, the
+ values array must be empty. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value". The
+ requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ preloadPolicy:
+ description: preloadPolicy directs the client to include
+ hosts in its host preload list so that it never needs
+ to do an initial load to get the HSTS header (note
+ that this is not defined in RFC 6797 and is therefore
+ client implementation-dependent).
+ enum:
+ - RequirePreload
+ - RequireNoPreload
+ - NoOpinion
+ type: string
+ required:
+ - domainPatterns
+ type: object
+ type: array
+ type: object
+ network:
+ description: 'Network holds cluster-wide information about the
+ network. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin,
+ etc. Please view network.spec for an explanation on what applies
+ when configuring this resource. TODO (csrwng): Add validation
+ here to exclude changes that conflict with networking settings
+ in the HostedCluster.Spec.Networking field.'
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field
+ is immutable after installation.
+ items:
+ description: ClusterNetworkEntry is a contiguous block of
+ IP addresses from which pod IPs are allocated.
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate
+ to each node. If this field is not used by the plugin,
+ it can be left unset.
+ format: int32
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ externalIP:
+ description: externalIP defines configuration for controllers
+ that affect Service.ExternalIP. If nil, then ExternalIP
+ is not allowed to be set.
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from which
+ to automatically assign Service.ExternalIP. These are
+ assigned when the service is of type LoadBalancer. In
+ general, this is only useful for bare-metal clusters.
+ In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected
+ by any ExternalIPPolicy rules. Currently, only one entry
+ may be provided.
+ items:
+ type: string
+ type: array
+ policy:
+ description: policy is a set of restrictions applied to
+ the ExternalIP field. If nil or empty, then ExternalIP
+ is not allowed to be set.
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed CIDRs.
+ items:
+ type: string
+ type: array
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed
+ CIDRs. These take precedence over allowedCIDRs.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed
+ (e.g. OpenShiftSDN). This should match a value that the
+ cluster-network-operator understands, or else no networking
+ will be installed. Currently supported values are: - OpenShiftSDN
+ This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only
+ support a single entry here. This field is immutable after
+ installation.
+ items:
+ type: string
+ type: array
+ serviceNodePortRange:
+ description: The port range allowed for Services of type NodePort.
+ If not specified, the default of 30000-32767 will be used.
+ Such Services without a NodePort specified will have one
+ automatically allocated from this range. This parameter
+ can be updated after the cluster is installed.
+ pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: string
+ type: object
+ oauth:
+ description: OAuth holds cluster-wide information about OAuth.
+ It is used to configure the integrated OAuth server. This configuration
+ is only honored when the top level Authentication config has
+ type set to IntegratedOAuth.
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways
+ for a user to identify themselves. When this list is empty,
+ no identities are provisioned for users.
+ items:
+ description: IdentityProvider provides identities for users
+ authenticating using credentials
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options
+ for the BasicAuth IdP
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to locate
+ the data. If specified and the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified certificate data is
+ not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used to
+ locate the data. If specified and the secret or
+ expected key is not found, the identity provider
+ is not honored. If the specified certificate data
+ is not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ type: object
+ github:
+ description: github enables user authentication using
+ GitHub credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. This can only be configured when hostname
+ is set to a non-empty value. The namespace for
+ this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostname:
+ description: hostname is the optional domain (e.g.
+ "mycompany.com") for use with a hosted instance
+ of GitHub Enterprise. It must match the GitHub
+ Enterprise settings value configured at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts
+ which organizations are allowed to log in
+ items:
+ type: string
+ type: array
+ teams:
+ description: teams optionally restricts which teams
+ are allowed to log in. Format is /.
+ items:
+ type: string
+ type: array
+ type: object
+ gitlab:
+ description: gitlab enables user authentication using
+ GitLab credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the oauth server base URL
+ type: string
+ type: object
+ google:
+ description: google enables user authentication using
+ Google credentials
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostedDomain:
+ description: hostedDomain is the optional Google
+ App domain (e.g. "mycompany.com") to restrict
+ logins to
+ type: string
+ type: object
+ htpasswd:
+ description: htpasswd enables user authentication using
+ an HTPasswd file to validate credentials
+ properties:
+ fileData:
+ description: fileData is a required reference to
+ a secret by name containing the data to use as
+ the htpasswd file. The key "htpasswd" is used
+ to locate the data. If the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not
+ valid, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ keystone:
+ description: keystone enables user authentication using
+ keystone password credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ domainName:
+ description: domainName is required for keystone
+ v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to locate
+ the data. If specified and the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified certificate data is
+ not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used to
+ locate the data. If specified and the secret or
+ expected key is not found, the identity provider
+ is not honored. If the specified certificate data
+ is not valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ type: object
+ ldap:
+ description: ldap enables user authentication using
+ LDAP credentials
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to
+ identities
+ properties:
+ email:
+ description: email is the list of attributes
+ whose values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ id:
+ description: id is the list of attributes whose
+ values should be used as the user ID. Required.
+ First non-empty attribute is used. At least
+ one attribute is required. If none of the
+ listed attribute have a value, authentication
+ fails. LDAP standard identity attribute is
+ "dn"
+ items:
+ type: string
+ type: array
+ name:
+ description: name is the list of attributes
+ whose values should be used as the display
+ name. Optional. If unspecified, no display
+ name is set for the identity LDAP standard
+ display name attribute is "cn"
+ items:
+ type: string
+ type: array
+ preferredUsername:
+ description: preferredUsername is the list of
+ attributes whose values should be used as
+ the preferred username. LDAP standard login
+ attribute is "uid"
+ items:
+ type: string
+ type: array
+ type: object
+ bindDN:
+ description: bindDN is an optional DN to bind with
+ during the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference
+ to a secret by name containing a password to bind
+ with during the search phase. The key "bindPassword"
+ is used to locate the data. If specified and the
+ secret or expected key is not found, the identity
+ provider is not honored. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: 'insecure, if true, indicates the connection
+ should not use TLS WARNING: Should not be set
+ to `true` with the URL scheme "ldaps://" as "ldaps://"
+ URLs always attempt to connect using TLS, even
+ when `insecure` is set to `true` When `true`,
+ "ldap://" URLS connect insecurely. When `false`,
+ "ldap://" URLs are upgraded to a TLS connection
+ using StartTLS as specified in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies
+ the LDAP search parameters to use. The syntax
+ of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ type: object
+ mappingMethod:
+ description: mappingMethod determines how identities
+ from this provider are mapped to users Defaults to
+ "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities
+ returned by this provider. - It MUST be unique and
+ not shared by any other identity provider used - It
+ MUST be a valid path segment: name cannot equal "."
+ or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using
+ OpenID credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. The
+ key "ca.crt" is used to locate the data. If specified
+ and the config map or expected key is not found,
+ the identity provider is not honored. If the specified
+ ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots
+ are used. The namespace for this config map is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ claims:
+ description: claims mappings
+ properties:
+ email:
+ description: email is the list of claims whose
+ values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ groups:
+ description: groups is the list of claims value
+ of which should be used to synchronize groups
+ from the OIDC provider to OpenShift for the
+ user. If multiple claims are specified, the
+ first one with a non-empty value is used.
+ items:
+ description: OpenIDClaim represents a claim
+ retrieved from an OpenID provider's tokens
+ or userInfo responses
+ minLength: 1
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ description: name is the list of claims whose
+ values should be used as the display name.
+ Optional. If unspecified, no display name
+ is set for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ preferredUsername:
+ description: preferredUsername is the list of
+ claims whose values should be used as the
+ preferred username. If unspecified, the preferred
+ username is determined from the value of the
+ sub claim
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ extraAuthorizeParameters:
+ additionalProperties:
+ type: string
+ description: extraAuthorizeParameters are any custom
+ parameters to add to the authorize request.
+ type: object
+ extraScopes:
+ description: extraScopes are any scopes to request
+ in addition to the standard "openid" scope.
+ items:
+ type: string
+ type: array
+ issuer:
+ description: issuer is the URL that the OpenID Provider
+ asserts as its Issuer Identifier. It must use
+ the https scheme with no query or fragment component.
+ type: string
+ type: object
+ requestHeader:
+ description: requestHeader enables user authentication
+ using request header credentials
+ properties:
+ ca:
+ description: ca is a required reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the TLS
+ certificate presented by the remote server. Specifically,
+ it allows verification of incoming requests to
+ prevent header spoofing. The key "ca.crt" is used
+ to locate the data. If the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ challengeURL:
+ description: challengeURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect WWW-Authenticate
+ challenges will be redirected here. ${url} is
+ replaced with the current URL, escaped to be safe
+ in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string
+ https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional list
+ of common names to require a match from. If empty,
+ any client certificate validated against the clientCA
+ bundle is considered authoritative.
+ items:
+ type: string
+ type: array
+ emailHeaders:
+ description: emailHeaders is the set of headers
+ to check for the email address
+ items:
+ type: string
+ type: array
+ headers:
+ description: headers is the set of headers to check
+ for identity information
+ items:
+ type: string
+ type: array
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect interactive logins
+ will be redirected here ${url} is replaced with
+ the current URL, escaped to be safe in a query
+ parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string
+ https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers to
+ check for the display name
+ items:
+ type: string
+ type: array
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set
+ of headers to check for the preferred username
+ items:
+ type: string
+ type: array
+ type: object
+ type:
+ description: type identifies the identity provider type
+ for this entry.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ templates:
+ description: templates allow you to customize pages like the
+ login page.
+ properties:
+ error:
+ description: error is the name of a secret that specifies
+ a go template to use to render error pages during the
+ authentication or grant flow. The key "errors.html"
+ is used to locate the template data. If specified and
+ the secret or expected key is not found, the default
+ error page is used. If the specified template is not
+ valid, the default error page is used. If unspecified,
+ the default error page is used. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ login:
+ description: login is the name of a secret that specifies
+ a go template to use to render the login page. The key
+ "login.html" is used to locate the template data. If
+ specified and the secret or expected key is not found,
+ the default login page is used. If the specified template
+ is not valid, the default login page is used. If unspecified,
+ the default login page is used. The namespace for this
+ secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ providerSelection:
+ description: providerSelection is the name of a secret
+ that specifies a go template to use to render the provider
+ selection page. The key "providers.html" is used to
+ locate the template data. If specified and the secret
+ or expected key is not found, the default provider selection
+ page is used. If the specified template is not valid,
+ the default provider selection page is used. If unspecified,
+ the default provider selection page is used. The namespace
+ for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ tokenConfig:
+ description: tokenConfig contains options for authorization
+ and access tokens
+ properties:
+ accessTokenInactivityTimeout:
+ description: "accessTokenInactivityTimeout defines the
+ token inactivity timeout for tokens granted by any client.
+ The value represents the maximum amount of time that
+ can occur between consecutive uses of the token. Tokens
+ become invalid if they are not used within this temporal
+ window. The user will need to acquire a new token to
+ regain access once a token times out. Takes valid time
+ duration string such as \"5m\", \"1.5h\" or \"2h45m\".
+ The minimum allowed value for duration is 300s (5 minutes).
+ If the timeout is configured per client, then that value
+ takes precedence. If the timeout value is not specified
+ and the client does not override the value, then tokens
+ are valid until their lifetime. \n WARNING: existing
+ tokens' timeout will not be affected (lowered) by changing
+ this value"
+ type: string
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED:
+ setting this field has no effect.'
+ format: int32
+ type: integer
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum
+ age of access tokens
+ format: int32
+ type: integer
+ type: object
+ type: object
+ proxy:
+ description: Proxy holds cluster-wide information on how to configure
+ default proxies for the cluster.
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS
+ requests. Empty means unset and will not result in an env
+ var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs and/or IPs for which the proxy should not be
+ used. Empty means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ items:
+ type: string
+ type: array
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle. The trustedCA field should only
+ be consumed by a proxy validator. The validator is responsible
+ for reading the certificate bundle from the required key
+ \"ca-bundle.crt\", merging it with the system default trust
+ bundle, and writing the merged trust bundle to a ConfigMap
+ named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. Clients that expect to make proxy connections
+ must use the trusted-ca-bundle for all HTTPS requests to
+ the proxy, and may use the trusted-ca-bundle for non-proxy
+ HTTPS requests as well. \n The namespace for the ConfigMap
+ referenced by trustedCA is \"openshift-config\". Here is
+ an example ConfigMap (in yaml): \n apiVersion: v1 kind:
+ ConfigMap metadata: name: user-ca-bundle namespace: openshift-config
+ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom
+ CA certificate bundle. -----END CERTIFICATE-----"
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ scheduler:
+ description: Scheduler holds cluster-wide config information to
+ run the Kubernetes Scheduler and influence its placement decisions.
+ The canonical name for this config is `cluster`.
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide
+ default node selector to restrict pod placement to specific
+ nodes. This is applied to the pods created in all namespaces
+ and creates an intersection with any existing nodeSelectors
+ already set on a pod, additionally constraining that pod''s
+ selector. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having
+ project-wide node selectors won''t be impacted even if this
+ field is set. This adds an annotation section to the namespace.
+ For example, if a new namespace is created with node-selector=''type=user-node,region=east'',
+ the annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector
+ annotation is set on the project the value is used in preference
+ to the value we are setting for defaultNodeSelector field.
+ For instance, openshift.io/node-selector: "type=user-node,region=west"
+ means that the default of "type=user-node,region=east" set
+ in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to be
+ schedulable. When this flag is turned on, all the master
+ nodes in the cluster will be made schedulable, so that workload
+ pods can run on them. The default value for this field is
+ false, meaning none of the master nodes are schedulable.
+ Important Note: Once the workload pods start running on
+ the master nodes, extreme care must be taken to ensure that
+ cluster-critical control plane components are not impacted.
+ Please turn on this field after doing due diligence.'
+ type: boolean
+ policy:
+ description: 'DEPRECATED: the scheduler Policy API has been
+ deprecated and will be removed in a future release. policy
+ is a reference to a ConfigMap containing scheduler policy
+ which has user specified predicates and priorities. If this
+ ConfigMap is not available scheduler will default to use
+ DefaultAlgorithmProvider. The namespace for this configmap
+ is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ profile:
+ description: "profile sets which scheduling profile should
+ be set in order to configure scheduling decisions for new
+ pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\",
+ \"NoScoring\" Defaults to \"LowNodeUtilization\""
+ enum:
+ - ""
+ - LowNodeUtilization
+ - HighNodeUtilization
+ - NoScoring
+ type: string
+ type: object
+ type: object
+ controllerAvailabilityPolicy:
+ default: SingleReplica
+ description: ControllerAvailabilityPolicy specifies the availability
+ policy applied to critical control plane components. The default
+ value is SingleReplica.
+ type: string
+ dns:
+ description: DNSSpec specifies the DNS configuration in the cluster.
+ properties:
+ baseDomain:
+ description: BaseDomain is the base domain of the cluster.
+ type: string
+ privateZoneID:
+ description: PrivateZoneID is the Hosted Zone ID where all the
+ DNS records that are only available internally to the cluster
+ exist.
+ type: string
+ publicZoneID:
+ description: PublicZoneID is the Hosted Zone ID where all the
+ DNS records that are publicly accessible to the internet exist.
+ type: string
+ required:
+ - baseDomain
+ type: object
+ etcd:
+ description: Etcd contains metadata about the etcd cluster the hypershift
+ managed Openshift control plane components use to store data.
+ properties:
+ managed:
+ description: Managed specifies the behavior of an etcd cluster
+ managed by HyperShift.
+ properties:
+ storage:
+ description: Storage specifies how etcd data is persisted.
+ properties:
+ persistentVolume:
+ description: PersistentVolume is the configuration for
+ PersistentVolume etcd storage. With this implementation,
+ a PersistentVolume will be allocated for every etcd
+ member (either 1 or 3 depending on the HostedCluster
+ control plane availability configuration).
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Size is the minimum size of the data
+ volume for each etcd member.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClassName:
+ description: "StorageClassName is the StorageClass
+ of the data volume for each etcd member. \n See
+ https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1."
+ type: string
+ type: object
+ restoreSnapshotURL:
+ description: RestoreSnapshotURL allows an optional list
+ of URLs to be provided where an etcd snapshot can be
+ downloaded, for example a pre-signed URL referencing
+ a storage service, one URL per replica. This snapshot
+ will be restored on initial startup, only when the etcd
+ PV is empty.
+ items:
+ type: string
+ type: array
+ type:
+ description: Type is the kind of persistent storage implementation
+ to use for etcd.
+ enum:
+ - PersistentVolume
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - storage
+ type: object
+ managementType:
+ description: ManagementType defines how the etcd cluster is managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged specifies configuration which enables the
+ control plane to integrate with an eternally managed etcd cluster.
+ properties:
+ endpoint:
+ description: "Endpoint is the full etcd cluster client endpoint
+ URL. For example: \n https://etcd-client:2379 \n If the
+ URL uses an HTTPS scheme, the TLS field is required."
+ pattern: ^https://
+ type: string
+ tls:
+ description: TLS specifies TLS configuration for HTTPS etcd
+ client endpoints.
+ properties:
+ clientSecret:
+ description: "ClientSecret refers to a secret for client
+ mTLS authentication with the etcd cluster. It may have
+ the following key/value pairs: \n etcd-client-ca.crt:
+ Certificate Authority value etcd-client.crt: Client
+ certificate value etcd-client.key: Client certificate
+ key value"
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - clientSecret
+ type: object
+ required:
+ - endpoint
+ - tls
+ type: object
+ required:
+ - managementType
+ type: object
+ fips:
+ description: FIPS specifies if the nodes for the cluster will be running
+ in FIPS mode
+ type: boolean
+ imageContentSources:
+ description: ImageContentSources lists sources/repositories for the
+ release-image content.
+ items:
+ description: ImageContentSource specifies image mirrors that can
+ be used by cluster nodes to pull content. For cluster workloads,
+ if a container image registry host of the pullspec matches Source
+ then one of the Mirrors are substituted as hosts in the pullspec
+ and tried in order to fetch the image.
+ properties:
+ mirrors:
+ description: Mirrors are one or more repositories that may also
+ contain the same images.
+ items:
+ type: string
+ type: array
+ source:
+ description: Source is the repository that users refer to, e.g.
+ in image pull specifications.
+ type: string
+ required:
+ - source
+ type: object
+ type: array
+ infraID:
+ type: string
+ infrastructureAvailabilityPolicy:
+ default: SingleReplica
+ description: InfrastructureAvailabilityPolicy specifies the availability
+ policy applied to infrastructure services which run on cluster nodes.
+ The default value is SingleReplica.
+ type: string
+ issuerURL:
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which only
+ works for in-cluster validation.
+ type: string
+ kubeconfig:
+ description: KubeConfig specifies the name and key for the kubeconfig
+ secret
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ networking:
+ description: Networking specifies network configuration for the cluster.
+ Temporarily optional for backward compatibility, required in future
+ releases.
+ properties:
+ apiServer:
+ description: APIServer contains advanced network settings for
+ the API server that affect how the APIServer is exposed inside
+ a cluster node.
+ properties:
+ advertiseAddress:
+ description: AdvertiseAddress is the address that nodes will
+ use to talk to the API server. This is an address associated
+ with the loopback adapter of each node. If not specified,
+ 172.20.0.1 is used.
+ type: string
+ allowedCIDRBlocks:
+ description: AllowedCIDRBlocks is an allow list of CIDR blocks
+ that can access the APIServer If not specified, traffic
+ is allowed from all addresses. This depends on underlying
+ support by the cloud provider for Service LoadBalancerSourceRanges
+ items:
+ pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
+ type: string
+ type: array
+ port:
+ description: Port is the port at which the APIServer is exposed
+ inside a node. Other pods using host networking cannot listen
+ on this port. If not specified, 6443 is used.
+ format: int32
+ type: integer
+ type: object
+ clusterNetwork:
+ description: ClusterNetwork is the list of IP address pools for
+ pods.
+ items:
+ description: ClusterNetworkEntry is a single IP address block
+ for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool.
+ type: string
+ hostPrefix:
+ description: HostPrefix is the prefix size to allocate to
+ each node from the CIDR. For example, 24 would allocate
+ 2^8=256 adresses to each node. If this field is not used
+ by the plugin, it can be left unset.
+ format: int32
+ type: integer
+ required:
+ - cidr
+ type: object
+ type: array
+ machineNetwork:
+ description: MachineNetwork is the list of IP address pools for
+ machines.
+ items:
+ description: MachineNetworkEntry is a single IP address block
+ for node IP blocks.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for machines
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ networkType:
+ default: OVNKubernetes
+ description: NetworkType specifies the SDN provider used for cluster
+ networking.
+ enum:
+ - OpenShiftSDN
+ - Calico
+ - OVNKubernetes
+ - Other
+ type: string
+ serviceNetwork:
+ description: 'ServiceNetwork is the list of IP address pools for
+ services. NOTE: currently only one entry is supported.'
+ items:
+ description: ServiceNetworkEntry is a single IP address block
+ for the service network.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for services
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ required:
+ - clusterNetwork
+ - networkType
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector when specified, must be true for the pods
+ managed by the HostedCluster to be scheduled.
+ type: object
+ olmCatalogPlacement:
+ default: management
+ description: OLMCatalogPlacement specifies the placement of OLM catalog
+ components. By default, this is set to management and OLM catalog
+ components are deployed onto the management cluster. If set to guest,
+ the OLM catalog components will be deployed onto the guest cluster.
+ enum:
+ - management
+ - guest
+ type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format or
+ a boolean. If a date is provided: reconciliation is paused on the
+ resource until that date. If the boolean true is provided: reconciliation
+ is paused on the resource until the field is removed.'
+ type: string
+ platform:
+ description: PlatformSpec specifies the underlying infrastructure
+ provider for the cluster and is used to configure platform specific
+ behavior.
+ properties:
+ agent:
+ description: Agent specifies configuration for agent-based installations.
+ properties:
+ agentNamespace:
+ description: AgentNamespace is the namespace where to search
+ for Agents for this cluster
+ type: string
+ required:
+ - agentNamespace
+ type: object
+ aws:
+ description: AWS specifies configuration for clusters running
+ on Amazon Web Services.
+ properties:
+ cloudProviderConfig:
+ description: 'CloudProviderConfig specifies AWS networking
+ configuration for the control plane. This is mainly used
+ for cloud provider controller config: https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364
+ TODO(dan): should this be named AWSNetworkConfig?'
+ properties:
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs
+ used to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify
+ an AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC is the VPC to use for control plane cloud
+ resources.
+ type: string
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created.
+ type: string
+ required:
+ - vpc
+ type: object
+ endpointAccess:
+ default: Public
+ description: EndpointAccess specifies the publishing scope
+ of cluster endpoints. The default is Public.
+ enum:
+ - Public
+ - PublicAndPrivate
+ - Private
+ type: string
+ region:
+ description: Region is the AWS region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot AMI
+ for a given release.
+ type: string
+ resourceTags:
+ description: ResourceTags is a list of additional tags to
+ apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports a
+ maximum of 50 tags per resource. OpenShift reserves 25 tags
+ for its use, leaving 25 tags available for the user.
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ rolesRef:
+ description: RolesRef contains references to various AWS IAM
+ roles required to enable integrations such as OIDC.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value
+ referencing a role appropriate for the Control Plane
+ Operator. \n The following is an example of a valid
+ policy document: \n { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:CreateVpcEndpoint\",
+ \"ec2:DescribeVpcEndpoints\", \"ec2:ModifyVpcEndpoint\",
+ \"ec2:DeleteVpcEndpoints\", \"ec2:CreateTags\", \"route53:ListHostedZones\"
+ ], \"Resource\": \"*\" }, { \"Effect\": \"Allow\", \"Action\":
+ [ \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\"
+ ], \"Resource\": \"arn:aws:route53:::%s\" } ] }"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing
+ a role appropriate for the Image Registry Operator.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [ {
+ \"Effect\": \"Allow\", \"Action\": [ \"s3:CreateBucket\",
+ \"s3:DeleteBucket\", \"s3:PutBucketTagging\", \"s3:GetBucketTagging\",
+ \"s3:PutBucketPublicAccessBlock\", \"s3:GetBucketPublicAccessBlock\",
+ \"s3:PutEncryptionConfiguration\", \"s3:GetEncryptionConfiguration\",
+ \"s3:PutLifecycleConfiguration\", \"s3:GetLifecycleConfiguration\",
+ \"s3:GetBucketLocation\", \"s3:ListBucket\", \"s3:GetObject\",
+ \"s3:PutObject\", \"s3:DeleteObject\", \"s3:ListBucketMultipartUploads\",
+ \"s3:AbortMultipartUpload\", \"s3:ListMultipartUploadParts\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust relationship
+ that allows it to be assumed via web identity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ Example: { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\":
+ \"{{ .ProviderARN }}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\",
+ \"Condition\": { \"StringEquals\": { \"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }} } } } ] } \n IngressARN
+ is an ARN value referencing a role appropriate for the
+ Ingress Operator. \n The following is an example of
+ a valid policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"elasticloadbalancing:DescribeLoadBalancers\", \"tag:GetResources\",
+ \"route53:ListHostedZones\" ], \"Resource\": \"*\" },
+ { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\"
+ ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\",
+ \"arn:aws:route53:::PRIVATE_ZONE_ID\" ] } ] }"
+ type: string
+ kubeCloudControllerARN:
+ description: "KubeCloudControllerARN is an ARN value referencing
+ a role appropriate for the KCM/KCC. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:DescribeInstances\",
+ \"ec2:DescribeImages\", \"ec2:DescribeRegions\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\",
+ \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\",
+ \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\",
+ \"elasticloadbalancing:AttachLoadBalancerToSubnets\",
+ \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\",
+ \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\",
+ \"elasticloadbalancing:CreateLoadBalancerListeners\",
+ \"elasticloadbalancing:ConfigureHealthCheck\", \"elasticloadbalancing:DeleteLoadBalancer\",
+ \"elasticloadbalancing:DeleteLoadBalancerListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\",
+ \"elasticloadbalancing:DetachLoadBalancerFromSubnets\",
+ \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\",
+ \"elasticloadbalancing:ModifyLoadBalancerAttributes\",
+ \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\",
+ \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\",
+ \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\",
+ \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\",
+ \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DescribeListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancerPolicies\",
+ \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\",
+ \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\",
+ \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\",
+ \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\" }
+ ] }"
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing a
+ role appropriate for the Network Operator. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\",
+ \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeInstanceStatus\",
+ \"ec2:DescribeInstanceTypes\", \"ec2:UnassignPrivateIpAddresses\",
+ \"ec2:AssignPrivateIpAddresses\", \"ec2:UnassignIpv6Addresses\",
+ \"ec2:AssignIpv6Addresses\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeNetworkInterfaces\" ], \"Resource\": \"*\"
+ } ] }"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value referencing
+ a role appropriate for the CAPI Controller. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:AllocateAddress\",
+ \"ec2:AssociateRouteTable\", \"ec2:AttachInternetGateway\",
+ \"ec2:AuthorizeSecurityGroupIngress\", \"ec2:CreateInternetGateway\",
+ \"ec2:CreateNatGateway\", \"ec2:CreateRoute\", \"ec2:CreateRouteTable\",
+ \"ec2:CreateSecurityGroup\", \"ec2:CreateSubnet\", \"ec2:CreateTags\",
+ \"ec2:DeleteInternetGateway\", \"ec2:DeleteNatGateway\",
+ \"ec2:DeleteRouteTable\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteSubnet\", \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\",
+ \"ec2:DescribeAddresses\", \"ec2:DescribeAvailabilityZones\",
+ \"ec2:DescribeImages\", \"ec2:DescribeInstances\", \"ec2:DescribeInternetGateways\",
+ \"ec2:DescribeNatGateways\", \"ec2:DescribeNetworkInterfaces\",
+ \"ec2:DescribeNetworkInterfaceAttribute\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\",
+ \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\",
+ \"ec2:DisassociateRouteTable\", \"ec2:DisassociateAddress\",
+ \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyNetworkInterfaceAttribute\",
+ \"ec2:ModifySubnetAttribute\", \"ec2:ReleaseAddress\",
+ \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\",
+ \"ec2:TerminateInstances\", \"tag:GetResources\", \"ec2:CreateLaunchTemplate\",
+ \"ec2:CreateLaunchTemplateVersion\", \"ec2:DescribeLaunchTemplates\",
+ \"ec2:DescribeLaunchTemplateVersions\", \"ec2:DeleteLaunchTemplate\",
+ \"ec2:DeleteLaunchTemplateVersions\" ], \"Resource\":
+ [ \"*\" ], \"Effect\": \"Allow\" }, { \"Condition\":
+ { \"StringLike\": { \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"
+ } }, \"Action\": [ \"iam:CreateServiceLinkedRole\" ],
+ \"Resource\": [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"
+ ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\"
+ ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\"
+ ], \"Effect\": \"Allow\" } ] }"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing a
+ role appropriate for the Storage Operator. \n The following
+ is an example of a valid policy document: \n { \"Version\":
+ \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\",
+ \"Action\": [ \"ec2:AttachVolume\", \"ec2:CreateSnapshot\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:DeleteSnapshot\",
+ \"ec2:DeleteTags\", \"ec2:DeleteVolume\", \"ec2:DescribeInstances\",
+ \"ec2:DescribeSnapshots\", \"ec2:DescribeTags\", \"ec2:DescribeVolumes\",
+ \"ec2:DescribeVolumesModifications\", \"ec2:DetachVolume\",
+ \"ec2:ModifyVolume\" ], \"Resource\": \"*\" } ] }"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ serviceEndpoints:
+ description: "ServiceEndpoints specifies optional custom endpoints
+ which will override the default service endpoint of specific
+ AWS Services. \n There must be only one ServiceEndpoint
+ for a given service name."
+ items:
+ description: AWSServiceEndpoint stores the configuration
+ for services to override existing defaults of AWS Services.
+ properties:
+ name:
+ description: Name is the name of the AWS service. This
+ must be provided and cannot be empty.
+ type: string
+ url:
+ description: URL is fully qualified URI with scheme
+ https, that overrides the default generated endpoint
+ for a client. This must be provided and cannot be
+ empty.
+ pattern: ^https://
+ type: string
+ required:
+ - name
+ - url
+ type: object
+ type: array
+ required:
+ - region
+ - rolesRef
+ type: object
+ azure:
+ description: Azure defines azure specific settings
+ properties:
+ credentials:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ location:
+ type: string
+ machineIdentityID:
+ type: string
+ resourceGroup:
+ type: string
+ securityGroupName:
+ type: string
+ subnetName:
+ type: string
+ subscriptionID:
+ type: string
+ vnetID:
+ type: string
+ vnetName:
+ type: string
+ required:
+ - credentials
+ - location
+ - machineIdentityID
+ - resourceGroup
+ - securityGroupName
+ - subnetName
+ - subscriptionID
+ - vnetID
+ - vnetName
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ powervs:
+ description: PowerVS specifies configuration for clusters running
+ on IBMCloud Power VS Service. This field is immutable. Once
+ set, It can't be changed.
+ properties:
+ accountID:
+ description: AccountID is the IBMCloud account id. This field
+ is immutable. Once set, It can't be changed.
+ type: string
+ cisInstanceCRN:
+ description: CISInstanceCRN is the IBMCloud CIS Service Instance's
+ Cloud Resource Name This field is immutable. Once set, It
+ can't be changed.
+ pattern: '^crn:'
+ type: string
+ ingressOperatorCloudCreds:
+ description: IngressOperatorCloudCreds is a reference to a
+ secret containing ibm cloud credentials for ingress operator
+ to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ kubeCloudControllerCreds:
+ description: "KubeCloudControllerCreds is a reference to a
+ secret containing cloud credentials with permissions matching
+ the cloud controller policy. This field is immutable. Once
+ set, It can't be changed. \n TODO(dan): document the \"cloud
+ controller policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ nodePoolManagementCreds:
+ description: "NodePoolManagementCreds is a reference to a
+ secret containing cloud credentials with permissions matching
+ the node pool management policy. This field is immutable.
+ Once set, It can't be changed. \n TODO(dan): document the
+ \"node pool management policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ region:
+ description: Region is the IBMCloud region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot image
+ for a given release. This field is immutable. Once set,
+ It can't be changed.
+ type: string
+ resourceGroup:
+ description: ResourceGroup is the IBMCloud Resource Group
+ in which the cluster resides. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ serviceInstanceID:
+ description: "ServiceInstance is the reference to the Power
+ VS service on which the server instance(VM) will be created.
+ Power VS service is a container for all Power VS instances
+ at a specific geographic region. serviceInstance can be
+ created via IBM Cloud catalog or CLI. ServiceInstanceID
+ is the unique identifier that can be obtained from IBM Cloud
+ UI or IBM Cloud cli. \n More detail about Power VS service
+ instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ \n This field is immutable. Once set, It can't be changed."
+ type: string
+ storageOperatorCloudCreds:
+ description: StorageOperatorCloudCreds is a reference to a
+ secret containing ibm cloud credentials for storage operator
+ to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources. This field is immutable. Once set, It can't
+ be changed.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC specifies IBM Cloud PowerVS Load Balancing
+ configuration for the control plane. This field is immutable.
+ Once set, It can't be changed.
+ properties:
+ name:
+ description: Name for VPC to used for all the service
+ load balancer. This field is immutable. Once set, It
+ can't be changed.
+ type: string
+ region:
+ description: Region is the IBMCloud region in which VPC
+ gets created, this VPC used for all the ingress traffic
+ into the OCP cluster. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ subnet:
+ description: Subnet is the subnet to use for load balancer.
+ This field is immutable. Once set, It can't be changed.
+ type: string
+ zone:
+ description: Zone is the availability zone where load
+ balancer cloud resources are created. This field is
+ immutable. Once set, It can't be changed.
+ type: string
+ required:
+ - name
+ - region
+ type: object
+ zone:
+ description: Zone is the availability zone where control plane
+ cloud resources are created. This field is immutable. Once
+ set, It can't be changed.
+ type: string
+ required:
+ - accountID
+ - cisInstanceCRN
+ - ingressOperatorCloudCreds
+ - kubeCloudControllerCreds
+ - nodePoolManagementCreds
+ - region
+ - resourceGroup
+ - serviceInstanceID
+ - storageOperatorCloudCreds
+ - subnet
+ - vpc
+ - zone
+ type: object
+ type:
+ description: Type is the type of infrastructure provider for the
+ cluster.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ pullSecret:
+ description: LocalObjectReference contains enough information to let
+ you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ releaseImage:
+ description: ReleaseImage is the release image applied to the hosted
+ control plane.
+ type: string
+ secretEncryption:
+ description: SecretEncryption contains metadata about the kubernetes
+ secret encryption strategy being used for the cluster when applicable.
+ properties:
+ aescbc:
+ description: AESCBC defines metadata about the AESCBC secret encryption
+ strategy
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to encrypt
+ new secrets
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ backupKey:
+ description: BackupKey defines the old key during the rotation
+ process so previously created secrets can continue to be
+ decrypted until they are all re-encrypted with the active
+ key.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - activeKey
+ type: object
+ kms:
+ description: KMS defines metadata about the kms secret encryption
+ strategy
+ properties:
+ aws:
+ description: AWS defines metadata about the configuration
+ of the AWS KMS Secret Encryption provider
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to
+ encrypt new secrets
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for the
+ encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ auth:
+ description: Auth defines metadata about the management
+ of credentials used to interact with AWS KMS
+ properties:
+ credentials:
+ description: Credentials contains the name of the
+ secret that holds the aws credentials that can be
+ used to make the necessary KMS calls. It should
+ at key AWSCredentialsFileSecretKey contain the aws
+ credentials file that can be used to configure AWS
+ SDKs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ backupKey:
+ description: BackupKey defines the old key during the
+ rotation process so previously created secrets can continue
+ to be decrypted until they are all re-encrypted with
+ the active key.
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for the
+ encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ region:
+ description: Region contains the AWS region
+ type: string
+ required:
+ - activeKey
+ - auth
+ - region
+ type: object
+ ibmcloud:
+ description: IBMCloud defines metadata for the IBM Cloud KMS
+ encryption strategy
+ properties:
+ auth:
+ description: Auth defines metadata for how authentication
+ is done with IBM Cloud KMS
+ properties:
+ managed:
+ description: Managed defines metadata around the service
+ to service authentication strategy for the IBM Cloud
+ KMS system (all provider managed).
+ type: object
+ type:
+ description: Type defines the IBM Cloud KMS authentication
+ strategy
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged defines the auth metadata the
+ customer provides to interact with IBM Cloud KMS
+ properties:
+ credentials:
+ description: Credentials should reference a secret
+ with a key field of IBMCloudIAMAPIKeySecretKey
+ that contains a apikey to call IBM Cloud KMS
+ APIs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ required:
+ - type
+ type: object
+ keyList:
+ description: KeyList defines the list of keys used for
+ data encryption
+ items:
+ description: IBMCloudKMSKeyEntry defines metadata for
+ an IBM Cloud KMS encryption key
+ properties:
+ correlationID:
+ description: CorrelationID is an identifier used
+ to track all api call usage from hypershift
+ type: string
+ crkID:
+ description: CRKID is the customer rook key id
+ type: string
+ instanceID:
+ description: InstanceID is the id for the key protect
+ instance
+ type: string
+ keyVersion:
+ description: KeyVersion is a unique number associated
+ with the key. The number increments whenever a
+ new key is enabled for data encryption.
+ type: integer
+ url:
+ description: URL is the url to call key protect
+ apis over
+ pattern: ^https://
+ type: string
+ required:
+ - correlationID
+ - crkID
+ - instanceID
+ - keyVersion
+ - url
+ type: object
+ type: array
+ region:
+ description: Region is the IBM Cloud region
+ type: string
+ required:
+ - auth
+ - keyList
+ - region
+ type: object
+ provider:
+ description: Provider defines the KMS provider
+ enum:
+ - IBMCloud
+ - AWS
+ type: string
+ required:
+ - provider
+ type: object
+ type:
+ description: Type defines the type of kube secret encryption being
+ used
+ enum:
+ - kms
+ - aescbc
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountSigningKey:
+ description: ServiceAccountSigningKey is a reference to a secret containing
+ the private key used by the service account token issuer. The secret
+ is expected to contain a single key named "key". If not specified,
+ a service account signing key will be generated automatically for
+ the cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ services:
+ description: Services defines metadata about how control plane services
+ are published in the management cluster.
+ items:
+ description: ServicePublishingStrategyMapping specifies how individual
+ control plane services are published from the hosting cluster
+ of a control plane.
+ properties:
+ service:
+ description: Service identifies the type of service being published.
+ enum:
+ - APIServer
+ - OAuthServer
+ - OIDC
+ - Konnectivity
+ - Ignition
+ - OVNSbDb
+ type: string
+ servicePublishingStrategy:
+ description: ServicePublishingStrategy specifies how to publish
+ Service.
+ properties:
+ loadBalancer:
+ description: LoadBalancer configures exposing a service
+ using a LoadBalancer.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the LoadBalancer.
+ type: string
+ type: object
+ nodePort:
+ description: NodePort configures exposing a service using
+ a NodePort.
+ properties:
+ address:
+ description: Address is the host/ip that the NodePort
+ service is exposed over.
+ type: string
+ port:
+ description: Port is the port of the NodePort service.
+ If <=0, the port is dynamically assigned when the
+ service is created.
+ format: int32
+ type: integer
+ required:
+ - address
+ type: object
+ route:
+ description: Route configures exposing a service using a
+ Route.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the Route.
+ type: string
+ type: object
+ type:
+ description: Type is the publishing strategy used for the
+ service.
+ enum:
+ - LoadBalancer
+ - NodePort
+ - Route
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - service
+ - servicePublishingStrategy
+ type: object
+ type: array
+ sshKey:
+ description: LocalObjectReference contains enough information to let
+ you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - dns
+ - etcd
+ - infraID
+ - issuerURL
+ - platform
+ - pullSecret
+ - releaseImage
+ - services
+ - sshKey
+ type: object
+ status:
+ description: HostedControlPlaneStatus defines the observed state of HostedControlPlane
+ properties:
+ conditions:
+ description: 'Condition contains details for one aspect of the current
+ state of the HostedControlPlane. Current condition types are: "Available"'
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint contains the endpoint information
+ by which external clients can access the control plane. This is
+ populated after the infrastructure is ready.
+ properties:
+ host:
+ description: Host is the hostname on which the API server is serving.
+ type: string
+ port:
+ description: Port is the port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ externalManagedControlPlane:
+ default: true
+ description: ExternalManagedControlPlane indicates to cluster-api
+ that the control plane is managed by an external service. https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468
+ type: boolean
+ initialized:
+ default: false
+ description: Initialized denotes whether or not the control plane
+ has provided a kubeadm-config. Once this condition is marked true,
+ its value is never changed. See the Ready condition for an indication
+ of the current readiness of the cluster's control plane. This satisfies
+ CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252
+ type: boolean
+ kubeConfig:
+ description: KubeConfig is a reference to the secret containing the
+ default kubeconfig for this control plane.
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ kubeadminPassword:
+ description: KubeadminPassword is a reference to the secret containing
+ the initial kubeadmin password for the guest cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ lastReleaseImageTransitionTime:
+ description: "lastReleaseImageTransitionTime is the time of the last
+ update to the current releaseImage property. \n Deprecated: Use
+ versionStatus.history[0].startedTime instead."
+ format: date-time
+ type: string
+ oauthCallbackURLTemplate:
+ description: OAuthCallbackURLTemplate contains a template for the
+ URL to use as a callback for identity providers. The [identity-provider-name]
+ placeholder must be replaced with the name of an identity provider
+ defined on the HostedCluster. This is populated after the infrastructure
+ is ready.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the HostedControlPlane API Server
+ is ready to receive requests This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
+ type: boolean
+ releaseImage:
+ description: "ReleaseImage is the release image applied to the hosted
+ control plane. \n Deprecated: Use versionStatus.desired.image instead."
+ type: string
+ version:
+ description: "Version is the semantic version of the release applied
+ by the hosted control plane operator \n Deprecated: Use versionStatus.desired.version
+ instead."
+ type: string
+ versionStatus:
+ description: versionStatus is the status of the release version applied
+ by the hosted control plane operator.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image and
+ associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates that
+ may be recommended for this cluster if it meets specific required
+ conditions. Consumers interested in the set of updates that
+ are actually recommended for this cluster should use availableUpdates.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an empty or invalid channel
+ has been specified.
+ items:
+ description: ConditionalUpdate represents an update which is
+ recommended to some clusters on the version the current cluster
+ is reconciling, but which may not be recommended for the current
+ cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules. *
+ Recommended, for whether the update is recommended for
+ the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type FooStatus
+ struct{ // Represents the observations of a foo's current
+ state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the
+ condition transitioned from one status to another.
+ This should be when the underlying condition changed. If
+ that is not known, then using the time when the
+ API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty
+ string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9, the
+ condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last transition.
+ Producers of specific condition types may define
+ expected values and meanings for this field, and
+ whether the values are considered a guaranteed API.
+ The value should be a CamelCase string. This field
+ may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful (see
+ .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the
+ availableUpdates field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update API
+ and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and which
+ do not. The slice is ordered by decreasing precedence.
+ The cluster-version operator will walk the slice
+ in order, and stop after the first it can successfully
+ evaluate. If no condition can be successfully evaluated,
+ the update will not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query classifying
+ clusters. This query query should return
+ a 1 in the match case and a 0 in the does-not-match
+ case. Queries which return no time series,
+ or which return values besides 0 or 1,
+ are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that matchingRules
+ match the cluster state. This is only to be consumed
+ by humans. It may contain Line Feed characters (U+000A),
+ which should be rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels to
+ which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should be
+ displayed as a link in user interfaces. The URL field may
+ not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is ordered
+ by recency. Updates in the history have state Completed if the
+ rollout completed - if an update was failing or halfway applied
+ the state will be Partial. Only a limited amount of update history
+ is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to the
+ cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition an
+ Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion time
+ will always be set for entries that are not the current
+ update (usually to the started time of the next update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that contains
+ the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not define
+ a version, or if a failure occurs retrieving the image,
+ this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the spec
+ is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
required:
- - conditions
- initialized
- ready
type: object
diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
index 12c176e41a9..93e7290e6ee 100644
--- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
+++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml
@@ -55,6 +55,8 @@ spec:
jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Message
type: string
+ deprecated: true
+ deprecationWarning: v1alpha1 is a deprecated version for NodePool
name: v1alpha1
schema:
openAPIV3Schema:
@@ -756,8 +758,789 @@ spec:
description: Version is the semantic version of the latest applied
release specified by the NodePool.
type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ status: {}
+ - additionalPrinterColumns:
+ - description: Cluster
+ jsonPath: .spec.clusterName
+ name: Cluster
+ type: string
+ - description: Desired Nodes
+ jsonPath: .spec.replicas
+ name: Desired Nodes
+ type: integer
+ - description: Available Nodes
+ jsonPath: .status.replicas
+ name: Current Nodes
+ type: integer
+ - description: Autoscaling Enabled
+ jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status
+ name: Autoscaling
+ type: string
+ - description: Node Autorepair Enabled
+ jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status
+ name: Autorepair
+ type: string
+ - description: Current version
+ jsonPath: .status.version
+ name: Version
+ type: string
+ - description: UpdatingVersion in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status
+ name: UpdatingVersion
+ type: string
+ - description: UpdatingConfig in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status
+ name: UpdatingConfig
+ type: string
+ - description: Message
+ jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Message
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: NodePool is a scalable set of worker nodes attached to a HostedCluster.
+ NodePool machine architectures are uniform within a given pool, and are
+ independent of the control plane’s underlying machine architecture.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec is the desired behavior of the NodePool.
+ properties:
+ autoScaling:
+ description: Autoscaling specifies auto-scaling behavior for the NodePool.
+ properties:
+ max:
+ description: Max is the maximum number of nodes allowed in the
+ pool. Must be >= 1.
+ format: int32
+ minimum: 1
+ type: integer
+ min:
+ description: Min is the minimum number of nodes to maintain in
+ the pool. Must be >= 1.
+ format: int32
+ minimum: 1
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ clusterName:
+ description: "ClusterName is the name of the HostedCluster this NodePool
+ belongs to. \n TODO(dan): Should this be a LocalObjectReference?"
+ type: string
+ config:
+ description: "Config is a list of references to ConfigMaps containing
+ serialized MachineConfig resources to be injected into the ignition
+ configurations of nodes in the NodePool. The MachineConfig API schema
+ is defined here: \n https://github.com/openshift/machine-config-operator/blob/18963e4f8fe66e8c513ca4b131620760a414997f/pkg/apis/machineconfiguration.openshift.io/v1/types.go#L185
+ \n Each ConfigMap must have a single key named \"config\" whose
+ value is the JSON or YAML of a serialized MachineConfig."
+ items:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ management:
+ description: Management specifies behavior for managing nodes in the
+ pool, such as upgrade strategies and auto-repair behaviors.
+ properties:
+ autoRepair:
+ description: AutoRepair specifies whether health checks should
+ be enabled for machines in the NodePool. The default is false.
+ type: boolean
+ inPlace:
+ description: InPlace is the configuration for in-place upgrades.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxUnavailable is the maximum number of nodes
+ that can be unavailable during the update. \n Value can
+ be an absolute number (ex: 5) or a percentage of desired
+ nodes (ex: 10%). \n Absolute number is calculated from percentage
+ by rounding down. \n Defaults to 1. \n Example: when this
+ is set to 30%, a max of 30% of the nodes can be made unschedulable/unavailable
+ immediately when the update starts. Once a set of nodes
+ is updated, more nodes can be made unschedulable for update,
+ ensuring that the total number of nodes schedulable at all
+ times during the update is at least 70% of desired nodes."
+ x-kubernetes-int-or-string: true
+ type: object
+ replace:
+ default:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ strategy: RollingUpdate
+ description: Replace is the configuration for rolling upgrades.
+ properties:
+ rollingUpdate:
+ description: RollingUpdate specifies a rolling update strategy
+ which upgrades nodes by creating new nodes and deleting
+ the old ones.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxSurge is the maximum number of nodes
+ that can be provisioned above the desired number of
+ nodes. \n Value can be an absolute number (ex: 5) or
+ a percentage of desired nodes (ex: 10%). \n Absolute
+ number is calculated from percentage by rounding up.
+ \n This can not be 0 if MaxUnavailable is 0. \n Defaults
+ to 1. \n Example: when this is set to 30%, new nodes
+ can be provisioned immediately when the rolling update
+ starts, such that the total number of old and new nodes
+ do not exceed 130% of desired nodes. Once old nodes
+ have been deleted, new nodes can be provisioned, ensuring
+ that total number of nodes running at any time during
+ the update is at most 130% of desired nodes."
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxUnavailable is the maximum number of
+ nodes that can be unavailable during the update. \n
+ Value can be an absolute number (ex: 5) or a percentage
+ of desired nodes (ex: 10%). \n Absolute number is calculated
+ from percentage by rounding down. \n This can not be
+ 0 if MaxSurge is 0. \n Defaults to 0. \n Example: when
+ this is set to 30%, old nodes can be deleted down to
+ 70% of desired nodes immediately when the rolling update
+ starts. Once new nodes are ready, more old nodes be
+ deleted, followed by provisioning new nodes, ensuring
+ that the total number of nodes available at all times
+ during the update is at least 70% of desired nodes."
+ x-kubernetes-int-or-string: true
+ type: object
+ strategy:
+ description: Strategy is the node replacement strategy for
+ nodes in the pool.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
+ upgradeType:
+ description: UpgradeType specifies the type of strategy for handling
+ upgrades.
+ enum:
+ - Replace
+ - InPlace
+ type: string
+ required:
+ - upgradeType
+ type: object
+ nodeDrainTimeout:
+ description: 'NodeDrainTimeout is the total amount of time that the
+ controller will spend on draining a node. The default value is 0,
+ meaning that the node can be drained without any time limitations.
+ NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
+ TODO (alberto): Today changing this field will trigger a recreate
+ rolling update, which kind of defeats the purpose of the change.
+ In future we plan to propagate this field in-place. https://github.com/kubernetes-sigs/cluster-api/issues/5880'
+ type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format or
+ a boolean. If a date is provided: reconciliation is paused on the
+ resource until that date. If the boolean true is provided: reconciliation
+ is paused on the resource until the field is removed.'
+ type: string
+ platform:
+ description: Platform specifies the underlying infrastructure provider
+ for the NodePool and is used to configure platform specific behavior.
+ properties:
+ agent:
+ description: Agent specifies the configuration used when using
+ Agent platform.
+ properties:
+ agentLabelSelector:
+ description: AgentLabelSelector contains labels that must
+ be set on an Agent in order to be selected for a Machine.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists or
+ DoesNotExist, the values array must be empty.
+ This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is
+ "key", the operator is "In", and the values array contains
+ only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ aws:
+ description: AWS specifies the configuration used when operating
+ on AWS.
+ properties:
+ ami:
+ description: AMI is the image id to use for node instances.
+ If unspecified, the default is chosen based on the NodePool
+ release payload image.
+ type: string
+ instanceProfile:
+ description: InstanceProfile is the AWS EC2 instance profile,
+ which is a container for an IAM role that the EC2 instance
+ uses.
+ type: string
+ instanceType:
+ description: InstanceType is an ec2 instance type for node
+ instances (e.g. m5.large).
+ type: string
+ resourceTags:
+ description: "ResourceTags is an optional list of additional
+ tags to apply to AWS node instances. \n These will be merged
+ with HostedCluster scoped tags, and HostedCluster tags take
+ precedence in case of conflicts. \n See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports a
+ maximum of 50 tags per resource. OpenShift reserves 25 tags
+ for its use, leaving 25 tags available for the user."
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ rootVolume:
+ description: RootVolume specifies configuration for the root
+ volume of node instances.
+ properties:
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. This is only valid for type io1.
+ format: int64
+ type: integer
+ size:
+ description: "Size specifies size (in Gi) of the storage
+ device. \n Must be greater than the image snapshot size
+ or 8 (whichever is greater)."
+ format: int64
+ minimum: 8
+ type: integer
+ type:
+ description: Type is the type of the volume.
+ type: string
+ required:
+ - size
+ - type
+ type: object
+ securityGroups:
+ description: SecurityGroups is an optional set of security
+ groups to associate with node instances.
+ items:
+ description: AWSResourceReference is a reference to a specific
+ AWS resource by ID, ARN, or filters. Only one of ID, ARN
+ or Filters may be specified. Specifying more than one
+ will result in a validation error.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used
+ to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ subnet:
+ description: Subnet is the subnet to use for node instances.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used
+ to identify a resource They are applied according to
+ the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names are
+ case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ required:
+ - instanceType
+ type: object
+ azure:
+ properties:
+ availabilityZone:
+ description: AvailabilityZone of the nodepool. Must not be
+ specified for clusters in a location that does not support
+ AvailabilityZone.
+ type: string
+ diskSizeGB:
+ default: 120
+ format: int32
+ minimum: 16
+ type: integer
+ diskStorageAccountType:
+ default: Premium_LRS
+ description: "DiskStorageAccountType is the disk storage account
+ type to use. Valid values are: * Standard_LRS: HDD * StandardSSD_LRS:
+ Standard SSD * Premium_LRS: Premium SDD * UltraSSD_LRS:
+ Ultra SDD \n Defaults to Premium_LRS. For more details,
+ visit the Azure documentation: https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison"
+ enum:
+ - Standard_LRS
+ - StandardSSD_LRS
+ - Premium_LRS
+ - UltraSSD_LRS
+ type: string
+ imageID:
+ description: 'ImageID is the id of the image to boot from.
+ If unset, the default image at the location below will be
+ used: subscription/$subscriptionID/resourceGroups/$resourceGroupName/providers/Microsoft.Compute/images/rhcos.x86_64.vhd'
+ type: string
+ vmsize:
+ type: string
+ required:
+ - vmsize
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ kubevirt:
+ description: Kubevirt specifies the configuration used when operating
+ on KubeVirt platform.
+ properties:
+ compute:
+ default:
+ cores: 2
+ memory: 4Gi
+ description: Compute contains values representing the virtual
+ hardware requested for the VM
+ properties:
+ cores:
+ default: 2
+ description: Cores represents how many cores the guest
+ VM should have
+ format: int32
+ type: integer
+ memory:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Memory represents how much guest memory the
+ VM should have
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ rootVolume:
+ description: RootVolume represents values associated with
+ the VM volume that will host rhcos
+ properties:
+ diskImage:
+ description: Image represents what rhcos image to use
+ for the node pool
+ properties:
+ containerDiskImage:
+ description: ContainerDiskImage is a string representing
+ the container image that holds the root disk
+ type: string
+ type: object
+ persistent:
+ description: Persistent volume type means the VM's storage
+ is backed by a PVC VMs that use persistent volumes can
+ survive disruption events like restart and eviction
+ This is the default type used when no storage type is
+ defined.
+ properties:
+ accessModes:
+ description: 'AccessModes is an array that contains
+ the desired Access Modes the root volume should
+ have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes'
+ items:
+ enum:
+ - ReadWriteOnce
+ - ReadWriteMany
+ - ReadOnly
+ - ReadWriteOncePod
+ type: string
+ type: array
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 16Gi
+ description: Size is the size of the persistent storage
+ volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClass:
+ description: StorageClass is the storageClass used
+ for the underlying PVC that hosts the volume
+ type: string
+ type: object
+ type:
+ default: Persistent
+ description: Type represents the type of storage to associate
+ with the kubevirt VMs.
+ enum:
+ - Persistent
+ type: string
+ type: object
+ required:
+ - rootVolume
+ type: object
+ powervs:
+ description: PowerVS specifies the configuration used when using
+ IBMCloud PowerVS platform.
+ properties:
+ image:
+ description: Image used for deploying the nodes. If unspecified,
+ the default is chosen based on the NodePool release payload
+ image.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ imageDeletePolicy:
+ default: delete
+ description: "ImageDeletePolicy is policy for the image deletion.
+ \n delete: delete the image from the infrastructure. retain:
+ delete the image from the openshift but retain in the infrastructure.
+ \n The default is delete"
+ enum:
+ - delete
+ - retain
+ type: string
+ memoryGiB:
+ default: 32
+ description: "MemoryGiB is the size of a virtual machine's
+ memory, in GiB. maximum value for the MemoryGiB depends
+ on the selected SystemType. when SystemType is set to e880
+ maximum MemoryGiB value is 7463 GiB. when SystemType is
+ set to e980 maximum MemoryGiB value is 15307 GiB. when SystemType
+ is set to s922 maximum MemoryGiB value is 942 GiB. The minimum
+ memory is 32 GiB. \n When omitted, this means the user has
+ no opinion and the platform is left to choose a reasonable
+ default. The current default is 32."
+ format: int32
+ type: integer
+ processorType:
+ default: shared
+ description: "ProcessorType is the VM instance processor type.
+ It must be set to one of the following values: Dedicated,
+ Capped or Shared. \n Dedicated: resources are allocated
+ for a specific client, The hypervisor makes a 1:1 binding
+ of a partition’s processor to a physical processor core.
+ Shared: Shared among other clients. Capped: Shared, but
+ resources do not expand beyond those that are requested,
+ the amount of CPU time is Capped to the value specified
+ for the entitlement. \n if the processorType is selected
+ as Dedicated, then Processors value cannot be fractional.
+ When omitted, this means that the user has no opinion and
+ the platform is left to choose a reasonable default. The
+ current default is Shared."
+ enum:
+ - dedicated
+ - shared
+ - capped
+ type: string
+ processors:
+ anyOf:
+ - type: integer
+ - type: string
+ default: "0.5"
+ description: Processors is the number of virtual processors
+ in a virtual machine. when the processorType is selected
+ as Dedicated the processors value cannot be fractional.
+ maximum value for the Processors depends on the selected
+ SystemType. when SystemType is set to e880 or e980 maximum
+ Processors value is 143. when SystemType is set to s922
+ maximum Processors value is 15. minimum value for Processors
+ depends on the selected ProcessorType. when ProcessorType
+ is set as Shared or Capped, The minimum processors is 0.5.
+ when ProcessorType is set as Dedicated, The minimum processors
+ is 1. When omitted, this means that the user has no opinion
+ and the platform is left to choose a reasonable default.
+ The default is set based on the selected ProcessorType.
+ when ProcessorType selected as Dedicated, the default is
+ set to 1. when ProcessorType selected as Shared or Capped,
+ the default is set to 0.5.
+ x-kubernetes-int-or-string: true
+ storageType:
+ default: tier1
+ description: "StorageType for the image and nodes, this will
+ be ignored if Image is specified. The storage tiers in PowerVS
+ are based on I/O operations per second (IOPS). It means
+ that the performance of your storage volumes is limited
+ to the maximum number of IOPS based on volume size and storage
+ tier. Although, the exact numbers might change over time,
+ the Tier 3 storage is currently set to 3 IOPS/GB, and the
+ Tier 1 storage is currently set to 10 IOPS/GB. \n The default
+ is tier1"
+ enum:
+ - tier1
+ - tier3
+ type: string
+ systemType:
+ default: s922
+ description: SystemType is the System type used to host the
+ instance. systemType determines the number of cores and
+ memory that is available. Few of the supported SystemTypes
+ are s922,e880,e980. e880 systemType available only in Dallas
+ Datacenters. e980 systemType available in Datacenters except
+ Dallas and Washington. When omitted, this means that the
+ user has no opinion and the platform is left to choose a
+ reasonable default. The current default is s922 which is
+ generally available.
+ type: string
+ type: object
+ type:
+ description: Type specifies the platform name.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ release:
+ description: Release specifies the OCP release used for the NodePool.
+ This informs the ignition configuration for machines, as well as
+ other platform specific machine properties (e.g. an AMI on the AWS
+ platform).
+ properties:
+ image:
+ description: Image is the image pullspec of an OCP release payload
+ image.
+ pattern: ^(\w+\S+)$
+ type: string
+ required:
+ - image
+ type: object
+ replicas:
+ description: Replicas is the desired number of nodes the pool should
+ maintain. If unset, the default value is 0.
+ format: int32
+ type: integer
+ tuningConfig:
+ description: "TuningConfig is a list of references to ConfigMaps containing
+ serialized Tuned resources to define the tuning configuration to
+ be applied to nodes in the NodePool. The Tuned API is defined here:
+ \n https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go
+ \n Each ConfigMap must have a single key named \"tuned\" whose value
+ is the JSON or YAML of a serialized Tuned."
+ items:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
required:
- - conditions
+ - clusterName
+ - management
+ - platform
+ - release
+ type: object
+ status:
+ description: Status is the latest observed status of the NodePool.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of the node pool's current state.
+ items:
+ description: We define our own condition type since metav1.Condition
+ has validation for Reason that might be broken by what we bubble
+ up from CAPI. NodePoolCondition defines an observation of NodePool
+ resource operational state.
+ properties:
+ lastTransitionTime:
+ description: Last time the condition transitioned from one status
+ to another. This should be when the underlying condition changed.
+ If that is not known, then using the time when the API field
+ changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: A human readable message indicating details about
+ the transition. This field may be empty.
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: The reason for the condition's last transition
+ in CamelCase. The specific API may choose whether or not this
+ field is considered a guaranteed API. This field may not be
+ empty.
+ type: string
+ severity:
+ description: Severity provides an explicit classification of
+ Reason code, so the users or machines can immediately understand
+ the current situation and act accordingly. The Severity field
+ MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ replicas:
+ description: Replicas is the latest observed number of nodes in the
+ pool.
+ format: int32
+ type: integer
+ version:
+ description: Version is the semantic version of the latest applied
+ release specified by the NodePool.
+ type: string
type: object
type: object
served: true
diff --git a/cmd/install/assets/hypershift_operator.go b/cmd/install/assets/hypershift_operator.go
index a02ea3b4e28..19bcc3a067d 100644
--- a/cmd/install/assets/hypershift_operator.go
+++ b/cmd/install/assets/hypershift_operator.go
@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/google/uuid"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/pkg/version"
"github.com/openshift/hypershift/support/images"
"github.com/openshift/hypershift/support/metrics"
@@ -1336,7 +1336,7 @@ type HyperShiftValidatingWebhookConfiguration struct {
func (o HyperShiftValidatingWebhookConfiguration) Build() *admissionregistrationv1.ValidatingWebhookConfiguration {
scope := admissionregistrationv1.NamespacedScope
- path := "/validate-hypershift-openshift-io-v1alpha1-hostedcluster"
+ path := "/validate-hypershift-openshift-io-v1beta1-hostedcluster"
sideEffects := admissionregistrationv1.SideEffectClassNone
timeout := int32(10)
validatingWebhookConfiguration := &admissionregistrationv1.ValidatingWebhookConfiguration{
@@ -1363,7 +1363,7 @@ func (o HyperShiftValidatingWebhookConfiguration) Build() *admissionregistration
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{"hypershift.openshift.io"},
- APIVersions: []string{"v1alpha1"},
+ APIVersions: []string{"v1beta1"},
Resources: []string{"hostedclusters"},
Scope: &scope,
},
diff --git a/cmd/install/assets/hypershift_operator_test.go b/cmd/install/assets/hypershift_operator_test.go
index 5d6a01dad5f..eca7306d5ac 100644
--- a/cmd/install/assets/hypershift_operator_test.go
+++ b/cmd/install/assets/hypershift_operator_test.go
@@ -5,7 +5,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/cmd/install/install.go b/cmd/install/install.go
index 9706c6c0c33..b0a57b8cb73 100644
--- a/cmd/install/install.go
+++ b/cmd/install/install.go
@@ -24,18 +24,20 @@ import (
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/utils/pointer"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
imageapi "github.com/openshift/api/image/v1"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/install/assets"
"github.com/openshift/hypershift/cmd/util"
"github.com/openshift/hypershift/cmd/version"
@@ -49,7 +51,8 @@ type Options struct {
ImageRefsFile string
HyperShiftOperatorReplicas int32
Development bool
- EnableWebhook bool
+ EnableValidatingWebhook bool
+ EnableConversionWebhook bool
Template bool
Format string
ExcludeEtcdManifests bool
@@ -119,7 +122,7 @@ func (o *Options) ApplyDefaults() {
switch {
case o.Development:
o.HyperShiftOperatorReplicas = 0
- case o.EnableWebhook:
+ case o.EnableValidatingWebhook || o.EnableConversionWebhook:
o.HyperShiftOperatorReplicas = 2
default:
o.HyperShiftOperatorReplicas = 1
@@ -140,11 +143,13 @@ func NewCommand() *cobra.Command {
}
opts.PrivatePlatform = string(hyperv1.NonePlatform)
opts.MetricsSet = metrics.DefaultMetricsSet
+ opts.EnableConversionWebhook = true // default to enabling the conversion webhook
cmd.PersistentFlags().StringVar(&opts.Namespace, "namespace", "hypershift", "The namespace in which to install HyperShift")
cmd.PersistentFlags().StringVar(&opts.HyperShiftImage, "hypershift-image", version.HyperShiftImage, "The HyperShift image to deploy")
cmd.PersistentFlags().BoolVar(&opts.Development, "development", false, "Enable tweaks to facilitate local development")
- cmd.PersistentFlags().BoolVar(&opts.EnableWebhook, "enable-webhook", false, "Enable webhook for hypershift API types")
+ cmd.PersistentFlags().BoolVar(&opts.EnableValidatingWebhook, "enable-validating-webhook", false, "Enable webhook for validating hypershift API types")
+ cmd.PersistentFlags().BoolVar(&opts.EnableConversionWebhook, "enable-conversion-webhook", true, "Enable webhook for converting hypershift API types")
cmd.PersistentFlags().BoolVar(&opts.ExcludeEtcdManifests, "exclude-etcd", false, "Leave out etcd manifests")
cmd.PersistentFlags().Var(&opts.PlatformMonitoring, "platform-monitoring", "Select an option for enabling platform cluster monitoring. Valid values are: None, OperatorOnly, All")
cmd.PersistentFlags().BoolVar(&opts.EnableCIDebugOutput, "enable-ci-debug-output", opts.EnableCIDebugOutput, "If extra CI debug output should be enabled")
@@ -289,7 +294,7 @@ func hyperShiftOperatorManifests(opts Options) ([]crclient.Object, error) {
}.Build()
objects = append(objects, operatorRoleBinding)
- if opts.EnableWebhook {
+ if opts.EnableValidatingWebhook {
validatingWebhookConfiguration := assets.HyperShiftValidatingWebhookConfiguration{
Namespace: operatorNamespace,
}.Build()
@@ -417,7 +422,7 @@ func hyperShiftOperatorManifests(opts Options) ([]crclient.Object, error) {
Replicas: opts.HyperShiftOperatorReplicas,
EnableOCPClusterMonitoring: opts.PlatformMonitoring == metrics.PlatformMonitoringAll,
EnableCIDebugOutput: opts.EnableCIDebugOutput,
- EnableWebhook: opts.EnableWebhook,
+ EnableWebhook: opts.EnableValidatingWebhook || opts.EnableConversionWebhook,
PrivatePlatform: opts.PrivatePlatform,
AWSPrivateRegion: opts.AWSPrivateRegion,
AWSPrivateSecret: operatorCredentialsSecret,
@@ -465,6 +470,30 @@ func hyperShiftOperatorManifests(opts Options) ([]crclient.Object, error) {
return false
}
return true
+ }, func(crd *apiextensionsv1.CustomResourceDefinition) {
+ if crd.Spec.Group == "hypershift.openshift.io" {
+ if !opts.EnableConversionWebhook {
+ return
+ }
+ if crd.Annotations != nil {
+ crd.Annotations = map[string]string{}
+ }
+ crd.Annotations["service.beta.openshift.io/inject-cabundle"] = "true"
+ crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{
+ Strategy: apiextensionsv1.WebhookConverter,
+ Webhook: &apiextensionsv1.WebhookConversion{
+ ClientConfig: &apiextensionsv1.WebhookClientConfig{
+ Service: &apiextensionsv1.ServiceReference{
+ Namespace: operatorNamespace.Name,
+ Name: operatorService.Name,
+ Port: pointer.Int32(443),
+ Path: pointer.String("/convert"),
+ },
+ },
+ ConversionReviewVersions: []string{"v1beta1", "v1alpha1"},
+ },
+ }
+ }
})...)
if opts.EnableAdminRBACGeneration {
diff --git a/cmd/install/install_test.go b/cmd/install/install_test.go
index ed37f82fa18..9ee32692442 100644
--- a/cmd/install/install_test.go
+++ b/cmd/install/install_test.go
@@ -4,7 +4,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestOptions_Validate(t *testing.T) {
diff --git a/cmd/kubeconfig/create.go b/cmd/kubeconfig/create.go
index 76ee61287cc..e8863663d2d 100644
--- a/cmd/kubeconfig/create.go
+++ b/cmd/kubeconfig/create.go
@@ -16,7 +16,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/util"
)
diff --git a/cmd/nodepool/agent/create.go b/cmd/nodepool/agent/create.go
index d4a065c23c0..8fc8d8092f5 100644
--- a/cmd/nodepool/agent/create.go
+++ b/cmd/nodepool/agent/create.go
@@ -3,7 +3,7 @@ package agent
import (
"context"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/nodepool/core"
"github.com/spf13/cobra"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/cmd/nodepool/aws/create.go b/cmd/nodepool/aws/create.go
index e4245da20ba..02b9f6cded4 100644
--- a/cmd/nodepool/aws/create.go
+++ b/cmd/nodepool/aws/create.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/nodepool/core"
"github.com/spf13/cobra"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/cmd/nodepool/azure/create.go b/cmd/nodepool/azure/create.go
index de104e790f1..59aaed92548 100644
--- a/cmd/nodepool/azure/create.go
+++ b/cmd/nodepool/azure/create.go
@@ -6,7 +6,7 @@ import (
"os/signal"
"syscall"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/log"
"github.com/openshift/hypershift/cmd/nodepool/core"
"github.com/spf13/cobra"
diff --git a/cmd/nodepool/core/create.go b/cmd/nodepool/core/create.go
index 2d424c036c5..2e1d6907e64 100644
--- a/cmd/nodepool/core/create.go
+++ b/cmd/nodepool/core/create.go
@@ -5,7 +5,7 @@ import (
"fmt"
"os"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/log"
"github.com/openshift/hypershift/cmd/util"
hyperapi "github.com/openshift/hypershift/support/api"
diff --git a/cmd/nodepool/kubevirt/create.go b/cmd/nodepool/kubevirt/create.go
index f4029920801..c56e78b565d 100644
--- a/cmd/nodepool/kubevirt/create.go
+++ b/cmd/nodepool/kubevirt/create.go
@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/nodepool/core"
)
diff --git a/cmd/util/client.go b/cmd/util/client.go
index f6feb926229..16b660ef80d 100644
--- a/cmd/util/client.go
+++ b/cmd/util/client.go
@@ -4,6 +4,9 @@ import (
"fmt"
"strings"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
cr "sigs.k8s.io/controller-runtime"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -32,10 +35,25 @@ func GetClient() (crclient.Client, error) {
if err != nil {
return nil, fmt.Errorf("unable to get kubernetes config: %w", err)
}
+ discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get discovery client: %w", err)
+ }
+ list, err := discoveryClient.ServerResourcesForGroupVersion(schema.GroupVersion{Group: "hypershift.openshift.io", Version: "v1beta1"}.String())
+ if err != nil && !errors.IsNotFound(err) {
+ return nil, fmt.Errorf("cannot discover HyperShift API version: %w", err)
+ }
+ wrapClient := false
+ if err != nil || len(list.APIResources) == 0 {
+ wrapClient = true
+ }
client, err := crclient.New(config, crclient.Options{Scheme: hyperapi.Scheme})
if err != nil {
return nil, fmt.Errorf("unable to get kubernetes client: %w", err)
}
+ if wrapClient {
+ return v1alpha1Client(client), nil
+ }
return client, nil
}
diff --git a/cmd/util/v1alpha1_wrapper.go b/cmd/util/v1alpha1_wrapper.go
new file mode 100644
index 00000000000..6789065937b
--- /dev/null
+++ b/cmd/util/v1alpha1_wrapper.go
@@ -0,0 +1,227 @@
+package util
+
+import (
+ "context"
+ "fmt"
+
+ hyperv1alpha1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ crclient "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+func v1alpha1Client(client crclient.Client) crclient.Client {
+ return &v1alpha1Wrapper{
+ innerClient: client,
+ }
+}
+
+// v1alpha1Wrapper is a client-side converter that uses the v1beta1 version of the
+// HyperShift API but translates it to v1alpha1 when sending/receiving from the cluster.
+// This allows us to have client v1beta1 code but still be able to interact with clusters
+// where v1beta1 does not exist yet.
+type v1alpha1Wrapper struct {
+ innerClient crclient.Client
+}
+
+func (w *v1alpha1Wrapper) Scheme() *runtime.Scheme {
+ return w.innerClient.Scheme()
+}
+
+func (w *v1alpha1Wrapper) RESTMapper() meta.RESTMapper {
+ return w.innerClient.RESTMapper()
+}
+
+func (w *v1alpha1Wrapper) Get(ctx context.Context, key crclient.ObjectKey, obj crclient.Object) error {
+ serverResource := v1alpha1Resource(obj)
+ if err := w.innerClient.Get(ctx, key, serverResource); err != nil {
+ return err
+ }
+ if serverResource != obj {
+ return convertToV1Beta1(serverResource, obj)
+ }
+ return nil
+}
+
+func (w *v1alpha1Wrapper) List(ctx context.Context, list crclient.ObjectList, opts ...crclient.ListOption) error {
+ serverResource := v1alpha1ListResource(list)
+ if err := w.innerClient.List(ctx, serverResource, opts...); err != nil {
+ return err
+ }
+ if serverResource != list {
+ return convertListToV1Beta1(serverResource, list)
+ }
+ return nil
+}
+
+func (w *v1alpha1Wrapper) Create(ctx context.Context, obj crclient.Object, opts ...crclient.CreateOption) error {
+ serverResource := v1alpha1Resource(obj)
+ if serverResource != obj {
+ if err := convertToV1Alpha1(obj, serverResource); err != nil {
+ return err
+ }
+ }
+ if err := w.innerClient.Create(ctx, serverResource, opts...); err != nil {
+ return err
+ }
+ if serverResource != obj {
+ return convertToV1Beta1(serverResource, obj)
+ }
+ return nil
+}
+
+func (w *v1alpha1Wrapper) Delete(ctx context.Context, obj crclient.Object, opts ...crclient.DeleteOption) error {
+ serverResource := v1alpha1Resource(obj)
+ if serverResource != obj {
+ if err := convertToV1Alpha1(obj, serverResource); err != nil {
+ return err
+ }
+ }
+ return w.innerClient.Delete(ctx, serverResource, opts...)
+}
+
+func (w *v1alpha1Wrapper) Update(ctx context.Context, obj crclient.Object, opts ...crclient.UpdateOption) error {
+ serverResource := v1alpha1Resource(obj)
+ if serverResource != obj {
+ if err := convertToV1Alpha1(obj, serverResource); err != nil {
+ return err
+ }
+ }
+ if err := w.innerClient.Update(ctx, serverResource, opts...); err != nil {
+ return err
+ }
+ if serverResource != obj {
+ return convertToV1Beta1(serverResource, obj)
+ }
+ return nil
+}
+
+func (w *v1alpha1Wrapper) Patch(ctx context.Context, obj crclient.Object, patch crclient.Patch, opts ...crclient.PatchOption) error {
+ serverResource := v1alpha1Resource(obj)
+ if serverResource != obj {
+ if err := convertToV1Alpha1(obj, serverResource); err != nil {
+ return err
+ }
+ }
+ if err := w.innerClient.Patch(ctx, serverResource, patch, opts...); err != nil {
+ return err
+ }
+ if serverResource != obj {
+ return convertToV1Beta1(serverResource, obj)
+ }
+ return nil
+}
+
+func (w *v1alpha1Wrapper) DeleteAllOf(ctx context.Context, obj crclient.Object, opts ...crclient.DeleteAllOfOption) error {
+ serverResource := v1alpha1Resource(obj)
+ return w.innerClient.DeleteAllOf(ctx, serverResource, opts...)
+}
+
+// Status returns the inner client since no CLI code sets status on resources.
+// If this client is ever used for anything other than the CLI, the Status client
+// would also need to handle conversion.
+func (w *v1alpha1Wrapper) Status() crclient.StatusWriter {
+ return w.innerClient.Status()
+}
+
+func v1alpha1Resource(obj crclient.Object) crclient.Object {
+ result := obj
+ switch obj.(type) {
+ case *hyperv1.HostedCluster:
+ result = &hyperv1alpha1.HostedCluster{}
+ case *hyperv1.NodePool:
+ result = &hyperv1alpha1.NodePool{}
+ case *hyperv1.AWSEndpointService:
+ result = &hyperv1alpha1.AWSEndpointService{}
+ case *hyperv1.HostedControlPlane:
+ result = &hyperv1.HostedControlPlane{}
+ }
+ return result
+}
+
+func v1alpha1ListResource(list crclient.ObjectList) crclient.ObjectList {
+ result := list
+ switch list.(type) {
+ case *hyperv1.HostedClusterList:
+ result = &hyperv1alpha1.HostedClusterList{}
+ case *hyperv1.NodePoolList:
+ result = &hyperv1alpha1.NodePoolList{}
+ case *hyperv1.AWSEndpointServiceList:
+ result = &hyperv1alpha1.AWSEndpointServiceList{}
+ case *hyperv1.HostedControlPlaneList:
+ result = &hyperv1.HostedControlPlaneList{}
+ }
+ return result
+}
+
+func convertToV1Beta1(src, dest crclient.Object) error {
+ destHub, isHub := dest.(conversion.Hub)
+ if !isHub {
+ return fmt.Errorf("destination resource is of not of type v1beta1: %T", dest)
+ }
+ convertible, isConvertible := src.(conversion.Convertible)
+ if !isConvertible {
+ return fmt.Errorf("source resource is not of type v1alpha1: %T", src)
+ }
+ return convertible.ConvertTo(destHub)
+}
+
+func convertToV1Alpha1(src, dest crclient.Object) error {
+ convertible, isConvertible := dest.(conversion.Convertible)
+ if !isConvertible {
+ return fmt.Errorf("destination resource is not of type v1alpha1: %T", dest)
+ }
+ hub, isHub := src.(conversion.Hub)
+ if !isHub {
+ return fmt.Errorf("source resource is not of type v1beta1: %T", src)
+ }
+ return convertible.ConvertFrom(hub)
+}
+
+func convertListToV1Beta1(src, dest crclient.ObjectList) error {
+ switch srcList := src.(type) {
+ case *hyperv1alpha1.HostedClusterList:
+ destList, ok := dest.(*hyperv1.HostedClusterList)
+ if !ok {
+ return fmt.Errorf("unexpected destination list type: %T", dest)
+ }
+ for i := range srcList.Items {
+ destItem := &hyperv1.HostedCluster{}
+ srcList.Items[i].ConvertTo(destItem)
+ destList.Items = append(destList.Items, *destItem)
+ }
+ case *hyperv1alpha1.NodePoolList:
+ destList, ok := dest.(*hyperv1.NodePoolList)
+ if !ok {
+ return fmt.Errorf("unexpected destination list type: %T", dest)
+ }
+ for i := range srcList.Items {
+ destItem := &hyperv1.NodePool{}
+ srcList.Items[i].ConvertTo(destItem)
+ destList.Items = append(destList.Items, *destItem)
+ }
+ case *hyperv1alpha1.AWSEndpointServiceList:
+ destList, ok := dest.(*hyperv1.AWSEndpointServiceList)
+ if !ok {
+ return fmt.Errorf("unexpected destination list type: %T", dest)
+ }
+ for i := range srcList.Items {
+ destItem := &hyperv1.AWSEndpointService{}
+ srcList.Items[i].ConvertTo(destItem)
+ destList.Items = append(destList.Items, *destItem)
+ }
+ case *hyperv1alpha1.HostedControlPlaneList:
+ destList, ok := dest.(*hyperv1.HostedControlPlaneList)
+ if !ok {
+ return fmt.Errorf("unexpected destination list type: %T", dest)
+ }
+ for i := range srcList.Items {
+ destItem := &hyperv1.HostedControlPlane{}
+ srcList.Items[i].ConvertTo(destItem)
+ destList.Items = append(destList.Items, *destItem)
+ }
+ }
+ return nil
+}
diff --git a/contrib/admission-tracer/main.go b/contrib/admission-tracer/main.go
index 66df46f9524..01069c1a8ea 100644
--- a/contrib/admission-tracer/main.go
+++ b/contrib/admission-tracer/main.go
@@ -9,7 +9,7 @@ import (
"github.com/google/go-cmp/cmp"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook"
diff --git a/control-plane-operator/controllers/awsprivatelink/awsprivatelink_controller.go b/control-plane-operator/controllers/awsprivatelink/awsprivatelink_controller.go
index 97a232d605b..544f6f0ae9f 100644
--- a/control-plane-operator/controllers/awsprivatelink/awsprivatelink_controller.go
+++ b/control-plane-operator/controllers/awsprivatelink/awsprivatelink_controller.go
@@ -35,7 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/upsert"
@@ -496,7 +496,7 @@ func reconcileAWSEndpointService(ctx context.Context, awsEndpointService *hyperv
}
log.Info("DNS record created", "fqdn", fqdn)
- awsEndpointService.Status.DNSName = fqdn
+ awsEndpointService.Status.DNSNames = []string{fqdn}
awsEndpointService.Status.DNSZoneID = zoneID
return nil
@@ -538,27 +538,26 @@ func (r *AWSEndpointServiceReconciler) delete(ctx context.Context, awsEndpointSe
log.Info("endpoint deleted", "endpointID", endpointID)
}
- fqdn := awsEndpointService.Status.DNSName
zoneID := awsEndpointService.Status.DNSZoneID
if err != nil {
return false, err
}
- if fqdn != "" && zoneID != "" {
- record, err := findRecord(ctx, route53Client, zoneID, fqdn)
- if err != nil {
- return false, err
- }
- if record != nil {
- err = deleteRecord(ctx, route53Client, zoneID, record)
+ for _, fqdn := range awsEndpointService.Status.DNSNames {
+ if fqdn != "" && zoneID != "" {
+ record, err := findRecord(ctx, route53Client, zoneID, fqdn)
if err != nil {
return false, err
}
- log.Info("DNS record deleted", "fqdn", fqdn)
- } else {
- log.Info("no DNS record found", "fqdn", fqdn)
+ if record != nil {
+ err = deleteRecord(ctx, route53Client, zoneID, record)
+ if err != nil {
+ return false, err
+ }
+ log.Info("DNS record deleted", "fqdn", fqdn)
+ } else {
+ log.Info("no DNS record found", "fqdn", fqdn)
+ }
}
- } else {
- log.Info("no DNS status set in AWSEndpointService", "name", awsEndpointService.Name)
}
return true, nil
diff --git a/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go
index 30f9da53a3b..273c00c5857 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go
@@ -3,7 +3,7 @@ package autoscaler
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
@@ -16,8 +16,8 @@ import (
k8sutilspointer "k8s.io/utils/pointer"
)
-func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeConfigSecret *corev1.Secret, options hyperv1.ClusterAutoscaling, clusterAutoscalerImage, availabilityProberImage string, setDefaultSecurityContext bool) error {
- config.OwnerRefFrom(hcp).ApplyTo(deployment)
+func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeConfigSecret *corev1.Secret, options hyperv1.ClusterAutoscaling, clusterAutoscalerImage, availabilityProberImage string, setDefaultSecurityContext bool, ownerRef config.OwnerRef) error {
+ ownerRef.ApplyTo(deployment)
args := []string{
"--cloud-provider=clusterapi",
"--node-group-auto-discovery=clusterapi:namespace=$(MY_NAMESPACE)",
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/params.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/params.go
index 9f68eb2bf0e..3a7781c3059 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/params.go
@@ -3,7 +3,7 @@ package aws
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/azure.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/azure.go
index 29cab28853f..d730a98be9a 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/azure.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/azure.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/powervs/powervs.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/powervs/powervs.go
index aa09b2e9d74..080f5f820bb 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cloud/powervs/powervs.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/powervs/powervs.go
@@ -5,7 +5,7 @@ import (
"fmt"
"text/template"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/releaseinfo"
appsv1 "k8s.io/api/apps/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go
index 963fb224732..5dfb320a857 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go
@@ -3,7 +3,7 @@ package clusterpolicy
import (
"path"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/params.go b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/params.go
index 0144c3b1654..dad6e076cd9 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/params.go
@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go
index 854fbce27e1..24beb935091 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go
@@ -5,7 +5,7 @@ import (
"github.com/blang/semver"
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/configoperator/params.go b/control-plane-operator/controllers/hostedcontrolplane/configoperator/params.go
index 854252d6af7..07bf54f5574 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/configoperator/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/configoperator/params.go
@@ -9,7 +9,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/intstr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go
index 56b45b2204e..714c3fb7694 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go
@@ -4,7 +4,7 @@ import (
"fmt"
"path"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer.go b/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer.go
index bf613635de9..adfff30122a 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer.go
@@ -3,7 +3,7 @@ package hostedcontrolplane
import (
"context"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/upsert"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer_test.go b/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer_test.go
index 45f2f20d41b..5c0b60f9193 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/creatorupdate_ownerref_enforcer_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/upsert"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go b/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go
index 3b2a6393b99..2ea8d8fea04 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go
@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
utilpointer "k8s.io/utils/pointer"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go
index c6ef8725f38..ab20f1aa392 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go
@@ -5,7 +5,7 @@ import (
"path"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/etcd/params.go b/control-plane-operator/controllers/hostedcontrolplane/etcd/params.go
index 5fc5e98514b..9f2ef5d71eb 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/etcd/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/etcd/params.go
@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/etcd/params_test.go b/control-plane-operator/controllers/hostedcontrolplane/etcd/params_test.go
index 67c70899ca3..2bb3aff5ce6 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/etcd/params_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/etcd/params_test.go
@@ -6,7 +6,7 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestNewEtcdParams(t *testing.T) {
diff --git a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go
index caa5aa201ec..c8b89f33767 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go
@@ -6,7 +6,7 @@ import (
"strconv"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/metrics"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile_test.go b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile_test.go
index 94675a97668..1d992938c7d 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile_test.go
@@ -6,7 +6,7 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go
index 7cf2b009569..012d5fec022 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go
@@ -13,7 +13,7 @@ import (
"github.com/blang/semver"
"github.com/go-logr/logr"
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/autoscaler"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/aws"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/azure"
@@ -227,7 +227,7 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
} else {
condition.Status = metav1.ConditionTrue
condition.Message = "Configuration passes validation"
- condition.Reason = hyperv1.HostedClusterAsExpectedReason
+ condition.Reason = hyperv1.AsExpectedReason
}
meta.SetStatusCondition(&hostedControlPlane.Status.Conditions, condition)
}
@@ -237,7 +237,7 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
newCondition := metav1.Condition{
Type: string(hyperv1.EtcdAvailable),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.EtcdStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
}
switch hostedControlPlane.Spec.Etcd.ManagementType {
case hyperv1.Managed:
@@ -305,7 +305,7 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
newCondition = metav1.Condition{
Type: string(hyperv1.KubeAPIServerAvailable),
Status: metav1.ConditionFalse,
- Reason: hyperv1.DeploymentNotFoundReason,
+ Reason: hyperv1.NotFoundReason,
Message: "Kube APIServer deployment not found",
}
} else {
@@ -316,7 +316,7 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
newCondition = metav1.Condition{
Type: string(hyperv1.KubeAPIServerAvailable),
Status: metav1.ConditionFalse,
- Reason: hyperv1.DeploymentStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
}
for _, cond := range deployment.Status.Conditions {
if cond.Type == appsv1.DeploymentAvailable {
@@ -331,7 +331,7 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
newCondition = metav1.Condition{
Type: string(hyperv1.KubeAPIServerAvailable),
Status: metav1.ConditionFalse,
- Reason: hyperv1.DeploymentWaitingForAvailableReason,
+ Reason: hyperv1.WaitingForAvailableReason,
Message: "Waiting for Kube APIServer deployment to become available",
}
}
@@ -595,6 +595,7 @@ func (r *HostedControlPlaneReconciler) update(ctx context.Context, hostedControl
true,
r.ReleaseProvider.GetRegistryOverrides(),
r.ManagementClusterCapabilities.Has(capabilities.CapabilitySecurityContextConstraint),
+ config.OwnerRefFrom(hostedControlPlane),
); err != nil {
return fmt.Errorf("failed to reconcile ignition server: %w", err)
}
@@ -2695,7 +2696,7 @@ func (r *HostedControlPlaneReconciler) reconcileAutoscaler(ctx context.Context,
autoscalerDeployment := manifests.AutoscalerDeployment(hcp.Namespace)
_, err = createOrUpdate(ctx, r.Client, autoscalerDeployment, func() error {
- return autoscaler.ReconcileAutoscalerDeployment(autoscalerDeployment, hcp, autoscalerServiceAccount, capiKubeConfigSecret, hcp.Spec.Autoscaling, autoscalerImage, availabilityProberImage, r.SetDefaultSecurityContext)
+ return autoscaler.ReconcileAutoscalerDeployment(autoscalerDeployment, hcp, autoscalerServiceAccount, capiKubeConfigSecret, hcp.Spec.Autoscaling, autoscalerImage, availabilityProberImage, r.SetDefaultSecurityContext, config.OwnerRefFrom(hcp))
})
if err != nil {
return fmt.Errorf("failed to reconcile autoscaler deployment: %w", err)
@@ -2751,7 +2752,7 @@ func (r *HostedControlPlaneReconciler) reconcileMachineApprover(ctx context.Cont
deployment := manifests.MachineApproverDeployment(hcp.Namespace)
if _, err := createOrUpdate(ctx, r.Client, deployment, func() error {
- return machineapprover.ReconcileMachineApproverDeployment(deployment, hcp, sa, kubeconfigSecretName, cm, machineApproverImage, availabilityProberImage, r.SetDefaultSecurityContext)
+ return machineapprover.ReconcileMachineApproverDeployment(deployment, hcp, sa, kubeconfigSecretName, cm, machineApproverImage, availabilityProberImage, r.SetDefaultSecurityContext, config.OwnerRefFrom(hcp))
}); err != nil {
return fmt.Errorf("failed to reconcile machine-approver deployment: %w", err)
}
diff --git a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go
index 78aa0eea9d7..464658ea567 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go
@@ -7,13 +7,14 @@ import (
"github.com/go-logr/zapr"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ configv1 "github.com/openshift/api/config/v1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/autoscaler"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/api"
fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake"
- "github.com/openshift/hypershift/support/globalconfig"
+ "github.com/openshift/hypershift/support/config"
fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake"
"go.uber.org/zap/zaptest"
appsv1 "k8s.io/api/apps/v1"
@@ -21,7 +22,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
@@ -41,29 +41,6 @@ import (
func TestReconcileKubeadminPassword(t *testing.T) {
targetNamespace := "test"
- OAuthConfig := `
-apiVersion: config.openshift.io/v1
-kind: OAuth
-metadata:
- name: "example"
-spec:
- identityProviders:
- - openID:
- claims:
- email:
- - email
- name:
- - clientid1-secret-name
- preferredUsername:
- - preferred_username
- clientID: clientid1
- clientSecret:
- name: clientid1-secret-name
- issuer: https://example.com/identity
- mappingMethod: lookup
- name: IAM
- type: OpenID
-`
testsCases := []struct {
name string
@@ -80,9 +57,27 @@ spec:
},
Spec: hyperv1.HostedControlPlaneSpec{
Configuration: &hyperv1.ClusterConfiguration{
- Items: []runtime.RawExtension{
- {
- Raw: []byte(OAuthConfig),
+ OAuth: &configv1.OAuthSpec{
+ IdentityProviders: []configv1.IdentityProvider{
+ {
+ IdentityProviderConfig: configv1.IdentityProviderConfig{
+ Type: configv1.IdentityProviderTypeOpenID,
+ OpenID: &configv1.OpenIDIdentityProvider{
+ ClientID: "clientid1",
+ ClientSecret: configv1.SecretNameReference{
+ Name: "clientid1-secret-name",
+ },
+ Issuer: "https://example.com/identity",
+ Claims: configv1.OpenIDClaims{
+ Email: []string{"email"},
+ Name: []string{"clientid1-secret-name"},
+ PreferredUsername: []string{"preferred_username"},
+ },
+ },
+ },
+ Name: "IAM",
+ MappingMethod: "lookup",
+ },
},
},
},
@@ -111,11 +106,7 @@ spec:
Client: fakeClient,
Log: ctrl.LoggerFrom(context.TODO()),
}
-
- globalConfig, err := globalconfig.ParseGlobalConfig(context.Background(), tc.hcp.Spec.Configuration)
- g.Expect(err).NotTo(HaveOccurred())
-
- err = r.reconcileKubeadminPassword(context.Background(), tc.hcp, globalConfig.OAuth != nil, controllerutil.CreateOrUpdate)
+ err := r.reconcileKubeadminPassword(context.Background(), tc.hcp, tc.hcp.Spec.Configuration != nil && tc.hcp.Spec.Configuration.OAuth != nil, controllerutil.CreateOrUpdate)
g.Expect(err).NotTo(HaveOccurred())
actualSecret := common.KubeadminPasswordSecret(targetNamespace)
@@ -139,6 +130,7 @@ func TestReconcileAPIServerService(t *testing.T) {
hostname := "test.example.com"
allowCIDR := []hyperv1.CIDRBlock{"1.2.3.4/24"}
allowCIDRString := []string{"1.2.3.4/24"}
+
testsCases := []struct {
name string
hcp *hyperv1.HostedControlPlane
@@ -311,7 +303,7 @@ func TestClusterAutoscalerArgs(t *testing.T) {
hcp := &hyperv1.HostedControlPlane{}
hcp.Name = "name"
hcp.Namespace = "namespace"
- err := autoscaler.ReconcileAutoscalerDeployment(deployment, hcp, sa, secret, test.AutoscalerOptions, "clusterAutoscalerImage", "availabilityProberImage", false)
+ err := autoscaler.ReconcileAutoscalerDeployment(deployment, hcp, sa, secret, test.AutoscalerOptions, "clusterAutoscalerImage", "availabilityProberImage", false, config.OwnerRefFrom(hcp))
if err != nil {
t.Error(err)
}
@@ -522,7 +514,7 @@ func TestEtcdRestoredCondition(t *testing.T) {
func TestEventHandling(t *testing.T) {
t.Parallel()
- rawHCP := `apiVersion: hypershift.openshift.io/v1alpha1
+ rawHCP := `apiVersion: hypershift.openshift.io/v1beta1
kind: HostedControlPlane
metadata:
annotations:
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ignition/params.go b/control-plane-operator/controllers/hostedcontrolplane/ignition/params.go
index 2d66a08bb90..ce87102c017 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ignition/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ignition/params.go
@@ -1,7 +1,7 @@
package ignition
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go
index a039e8aeb0b..451246f92f4 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go
@@ -9,7 +9,7 @@ import (
"strings"
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplaneoperator"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
hyperutil "github.com/openshift/hypershift/hypershift-operator/controllers/util"
@@ -39,6 +39,7 @@ func ReconcileIgnitionServer(ctx context.Context,
hasHealthzHandler bool,
registryOverrides map[string]string,
managementClusterHasCapabilitySecurityContextConstraint bool,
+ ownerRef config.OwnerRef,
) error {
log := ctrl.LoggerFrom(ctx)
@@ -397,6 +398,7 @@ func ReconcileIgnitionServer(ctx context.Context,
// Reconcile PodMonitor
podMonitor := ignitionserver.PodMonitor(controlPlaneNamespace)
+ ownerRef.ApplyTo(podMonitor)
if result, err := createOrUpdate(ctx, c, podMonitor, func() error {
podMonitor.Spec.Selector = *ignitionServerDeployment.Spec.Selector
podMonitor.Spec.PodMetricsEndpoints = []prometheusoperatorv1.PodMetricsEndpoint{{
@@ -404,12 +406,6 @@ func ReconcileIgnitionServer(ctx context.Context,
Port: "metrics",
}}
podMonitor.Spec.NamespaceSelector = prometheusoperatorv1.NamespaceSelector{MatchNames: []string{controlPlaneNamespace}}
- podMonitor.SetOwnerReferences([]metav1.OwnerReference{{
- APIVersion: hyperv1.GroupVersion.String(),
- Kind: "HostedControlPlane",
- Name: hcp.Name,
- UID: hcp.UID,
- }})
if podMonitor.Annotations == nil {
podMonitor.Annotations = map[string]string{}
}
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver_test.go b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver_test.go
index 7e03221a66f..8e5f4cfb9f4 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver_test.go
@@ -4,7 +4,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go b/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go
index 09b4a29a337..13b784f0d64 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilpointer "k8s.io/utils/pointer"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go b/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go
index b930b110746..043f28c90c1 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go
@@ -3,7 +3,7 @@ package ingressoperator
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/konnectivity"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/aws_kms.go b/control-plane-operator/controllers/hostedcontrolplane/kas/aws_kms.go
index 4d849a394d9..18e2b9d9613 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/aws_kms.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/aws_kms.go
@@ -7,7 +7,7 @@ import (
"path"
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/api"
"github.com/openshift/hypershift/support/util"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/config.go b/control-plane-operator/controllers/hostedcontrolplane/kas/config.go
index 0e671cc4238..f6aa99e496c 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/config.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/config.go
@@ -6,7 +6,7 @@ import (
"path"
"github.com/blang/semver"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go
index 6a1fa962e8e..b67d251dc7c 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go
@@ -4,7 +4,7 @@ import (
"fmt"
"path"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/ibmcloud_kms.go b/control-plane-operator/controllers/hostedcontrolplane/kas/ibmcloud_kms.go
index 3c0fa7b5a53..273610fa5c4 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/ibmcloud_kms.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/ibmcloud_kms.go
@@ -8,7 +8,7 @@ import (
"strconv"
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/api"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/params.go b/control-plane-operator/controllers/hostedcontrolplane/kas/params.go
index 07b1068df92..778b037d46f 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/params.go
@@ -5,7 +5,7 @@ import (
"fmt"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/aws"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/azure"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/params_test.go b/control-plane-operator/controllers/hostedcontrolplane/kas/params_test.go
index a516052a275..fbd5cebd769 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/params_test.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/params_test.go
@@ -8,7 +8,7 @@ import (
"k8s.io/utils/pointer"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go
index 396d853e5bb..a40cc185660 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go
@@ -1,7 +1,7 @@
package kas
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/secretencryption.go b/control-plane-operator/controllers/hostedcontrolplane/kas/secretencryption.go
index 22b47472a00..4de755bf25d 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/secretencryption.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/secretencryption.go
@@ -2,7 +2,8 @@ package kas
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
hcpconfig "github.com/openshift/hypershift/support/config"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/service.go b/control-plane-operator/controllers/hostedcontrolplane/kas/service.go
index 8166cb3374c..2e7471c32b9 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kas/service.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kas/service.go
@@ -10,7 +10,7 @@ import (
"k8s.io/apimachinery/pkg/util/duration"
"k8s.io/apimachinery/pkg/util/intstr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/events"
"github.com/openshift/hypershift/support/util"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go
index 18e4af44449..af66e8a19fe 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go
@@ -5,7 +5,7 @@ import (
"path"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/kcm/params.go b/control-plane-operator/controllers/hostedcontrolplane/kcm/params.go
index 8dcfec8df5b..59217589df0 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/kcm/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/kcm/params.go
@@ -8,7 +8,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/aws"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/azure"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/params.go b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/params.go
index 3be79554a24..2fa59df4adf 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/params.go
@@ -6,7 +6,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go
index 60232093e58..4008ae8dc97 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go
@@ -18,7 +18,7 @@ import (
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/ingress"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/pki"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go
index 619b31e19a2..b73c8b7f24f 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go
@@ -1,7 +1,7 @@
package machineapprover
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
@@ -72,8 +72,8 @@ func ReconcileMachineApproverRoleBinding(binding *rbacv1.RoleBinding, role *rbac
return nil
}
-func ReconcileMachineApproverDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeconfigSecretName string, cm *corev1.ConfigMap, machineApproverImage, availabilityProberImage string, setDefaultSecurityContext bool) error {
- config.OwnerRefFrom(hcp).ApplyTo(deployment)
+func ReconcileMachineApproverDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeconfigSecretName string, cm *corev1.ConfigMap, machineApproverImage, availabilityProberImage string, setDefaultSecurityContext bool, ownerRef config.OwnerRef) error {
+ ownerRef.ApplyTo(deployment)
// TODO: enable leader election when the flag is added in machine-approver
args := []string{
diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go
index 8ee899f3573..84279256351 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go
@@ -10,7 +10,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
const (
diff --git a/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go b/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go
index 30a903ad694..1e49451028a 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go
@@ -4,7 +4,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/globalconfig"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go
index 302e01f9f14..427c7d29201 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go
@@ -6,7 +6,7 @@ import (
"path"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go
index d5a102abeb4..69d0e3fb53c 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go
@@ -4,7 +4,7 @@ import (
"fmt"
"path"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go
index 8449465802e..9f8a649dd65 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go
@@ -6,7 +6,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/globalconfig"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go
index a3f35aa2482..532d3e1e660 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go
@@ -1,7 +1,7 @@
package oapi
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/service.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/service.go
index f278b5deea3..7acd383dc71 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oapi/service.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/service.go
@@ -1,7 +1,7 @@
package oapi
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go
index 3ab9d81f666..a93687c7874 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go
@@ -6,7 +6,7 @@ import (
"path"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/globalconfig"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go
index f1d66bc02be..c0fa56f06be 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go
@@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go
index 2ea2242688c..a4697ff3d7a 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go
@@ -1,7 +1,7 @@
package oauth
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go
index a9d06720225..29e478728d9 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go
@@ -3,7 +3,7 @@ package oauth
import (
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/service.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/service.go
index e435b8ec31b..0e44dc186fe 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/oauth/service.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/service.go
@@ -9,7 +9,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
routev1 "github.com/openshift/api/route/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go
index f68df253c71..37948475665 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go
@@ -4,7 +4,7 @@ import (
"fmt"
"path"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go
index 3c11878ccd5..4e269d39021 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go
@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/globalconfig"
)
diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go
index 9e33732cab1..6101b1a61a7 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go
@@ -4,7 +4,7 @@ import (
"fmt"
"math/big"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go b/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go
index 4211b52255d..216f375662e 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go
@@ -3,7 +3,7 @@ package olm
import (
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/params.go b/control-plane-operator/controllers/hostedcontrolplane/olm/params.go
index eb1779c7279..063004c8847 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/olm/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/olm/params.go
@@ -1,7 +1,7 @@
package olm
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
"k8s.io/utils/pointer"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/pki/params.go b/control-plane-operator/controllers/hostedcontrolplane/pki/params.go
index 824831bbc82..d6d97139274 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/pki/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/pki/params.go
@@ -3,7 +3,7 @@ package pki
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go
index 6859551e63a..94fa3dc4a1f 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go
@@ -5,7 +5,7 @@ import (
"path"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go b/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go
index 91e924002c3..a20b5825a81 100644
--- a/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go
+++ b/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go
@@ -8,7 +8,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/hostedclusterconfigoperator/api/scheme.go b/control-plane-operator/hostedclusterconfigoperator/api/scheme.go
index 4d2ff4f27e7..f6997e06673 100644
--- a/control-plane-operator/hostedclusterconfigoperator/api/scheme.go
+++ b/control-plane-operator/hostedclusterconfigoperator/api/scheme.go
@@ -10,7 +10,7 @@ import (
osinv1 "github.com/openshift/api/osin/v1"
routev1 "github.com/openshift/api/route/v1"
securityv1 "github.com/openshift/api/security/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/control-plane-operator/hostedclusterconfigoperator/cmd.go b/control-plane-operator/hostedclusterconfigoperator/cmd.go
index 3bb0f4cce1b..780c0e2f416 100644
--- a/control-plane-operator/hostedclusterconfigoperator/cmd.go
+++ b/control-plane-operator/hostedclusterconfigoperator/cmd.go
@@ -19,7 +19,7 @@ import (
"io/ioutil"
"os"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/api"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/configmetrics"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/cmca"
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go b/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go
index 467204dc2b2..cfcf8e8358f 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go
@@ -4,14 +4,11 @@ import (
"context"
"fmt"
"reflect"
- "time"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
- "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/operator"
"github.com/openshift/hypershift/support/releaseinfo"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -81,12 +78,26 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted
log.Info("Reconciling hosted cluster version conditions")
var clusterVersion configv1.ClusterVersion
err := h.hostedClusterClient.Get(ctx, crclient.ObjectKey{Name: "version"}, &clusterVersion)
+ // We check err in loop below to build conditions with ConditionUnknown status for all types.
+ if err == nil {
+ hcp.Status.VersionStatus = &hyperv1.ClusterVersionStatus{
+ Desired: clusterVersion.Status.Desired,
+ History: clusterVersion.Status.History,
+ ObservedGeneration: clusterVersion.Status.ObservedGeneration,
+ AvailableUpdates: clusterVersion.Status.AvailableUpdates,
+ ConditionalUpdates: clusterVersion.Status.ConditionalUpdates,
+ }
+ //lint:ignore SA1019 populate the deprecated property until we can drop it in a later API version
+ hcp.Status.Version = hcp.Status.VersionStatus.Desired.Version
+ //lint:ignore SA1019 populate the deprecated property until we can drop it in a later API version
+ hcp.Status.ReleaseImage = hcp.Status.VersionStatus.Desired.Image
+ }
failingCondition := func() metav1.Condition {
if err != nil {
return metav1.Condition{
Type: string(hyperv1.ClusterVersionFailing),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.ClusterVersionStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
Message: fmt.Sprintf("failed to get clusterversion: %v", err),
}
}
@@ -118,7 +129,7 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted
return metav1.Condition{
Type: string(hyperv1.ClusterVersionUpgradeable),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.ClusterVersionStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
Message: fmt.Sprintf("failed to get clusterversion: %v", err),
}
}
@@ -146,45 +157,5 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted
meta.SetStatusCondition(&hcp.Status.Conditions, upgradeableCondition)
log.Info("Finished reconciling hosted cluster version conditions")
- // If a rollout is in progress, compute and record the rollout status. The
- // image version will be considered rolled out if the hosted CVO reports
- // having completed the rollout of the semantic version matching the release
- // image specified on the HCP.
- if hcp.Status.ReleaseImage != hcp.Spec.ReleaseImage {
- releaseImage, err := h.lookupReleaseImage(ctx, hcp)
- if err != nil {
- return fmt.Errorf("failed to look up release image: %w", err)
- }
-
- timeout, cancel := context.WithTimeout(ctx, 2*time.Second)
- defer cancel()
- var clusterVersion configv1.ClusterVersion
- if err := h.hostedClusterClient.Get(timeout, crclient.ObjectKey{Name: "version"}, &clusterVersion); err != nil {
- log.Info("failed to get clusterversion, can't determine image version rollout status", "error", err)
- } else {
- versionHistory := clusterVersion.Status.History
- if len(versionHistory) > 0 &&
- versionHistory[0].Version == releaseImage.Version() &&
- versionHistory[0].State == configv1.CompletedUpdate {
- // Rollout to the desired release image version is complete, so record
- // that fact on the HCP status.
- now := metav1.NewTime(time.Now())
- hcp.Status.ReleaseImage = hcp.Spec.ReleaseImage
- hcp.Status.Version = releaseImage.Version()
- hcp.Status.LastReleaseImageTransitionTime = &now
- }
- }
- }
-
return nil
}
-
-func (h *hcpStatusReconciler) lookupReleaseImage(ctx context.Context, hcp *hyperv1.HostedControlPlane) (*releaseinfo.ReleaseImage, error) {
- pullSecret := common.PullSecret(hcp.Namespace)
- if err := h.mgtClusterClient.Get(ctx, crclient.ObjectKeyFromObject(pullSecret), pullSecret); err != nil {
- return nil, err
- }
- lookupCtx, lookupCancel := context.WithTimeout(ctx, 2*time.Minute)
- defer lookupCancel()
- return h.releaseProvider.Lookup(lookupCtx, hcp.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey])
-}
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/clusteroperators.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/clusteroperators.go
index da0ccaf01f3..3fff6e5830d 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/clusteroperators.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/clusteroperators.go
@@ -6,7 +6,7 @@ import (
"sort"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go
index c221015e0d4..d1a003adbda 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go
@@ -2,7 +2,7 @@ package ingress
import (
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/globalconfig"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go
index 7822e5f33fd..a55549c6e88 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go
@@ -6,7 +6,7 @@ import (
corev1 "k8s.io/api/core/v1"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go
index f29293d8fd0..f4d1341a139 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go
@@ -5,7 +5,7 @@ import (
. "github.com/onsi/gomega"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
corev1 "k8s.io/api/core/v1"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/params.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/params.go
index dd9677368ba..fb4695b08c8 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/params.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/params.go
@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/intstr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/config"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go
index dde219dfe2b..5ac1103af3d 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go
@@ -11,7 +11,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/config"
"github.com/openshift/hypershift/support/util"
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/hostedcp.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/hostedcp.go
index 923204f5320..c88e3c8e7f6 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/hostedcp.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/hostedcp.go
@@ -1,7 +1,7 @@
package manifests
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile.go
index 8f59ebb8819..a5f91de798d 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile.go
@@ -4,7 +4,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func NetworkOperator() *operatorv1.Network {
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile_test.go
index 640d304c6eb..16da7702112 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile_test.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/network/reconcile_test.go
@@ -5,7 +5,7 @@ import (
. "github.com/onsi/gomega"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestReconcileDefaultIngressController(t *testing.T) {
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/catalogs.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/catalogs.go
index c3913c4ed01..1ea0544d88e 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/catalogs.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/catalogs.go
@@ -5,7 +5,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
)
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/params.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/params.go
index 73e4dcdee52..087bee11cee 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/params.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/olm/params.go
@@ -3,7 +3,7 @@ package olm
import (
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
type OperatorLifecycleManagerParams struct {
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/registry/registry.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/registry/registry.go
index 31e2d173f78..7457c17113b 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/registry/registry.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/registry/registry.go
@@ -6,7 +6,7 @@ import (
imageregistryv1 "github.com/openshift/api/imageregistry/v1"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func ReconcileRegistryConfig(cfg *imageregistryv1.Config, platform hyperv1.PlatformType, availabilityPolicy hyperv1.AvailabilityPolicy) {
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go
index 159099ede30..f7a41feac88 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go
@@ -27,7 +27,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
imageregistryv1 "github.com/openshift/api/imageregistry/v1"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cloud/azure"
cpomanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
alerts "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts"
@@ -187,7 +187,7 @@ func (r *reconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result
}
log.Info("reconciling clusterversion")
- if err := r.reconcileClusterVersion(ctx); err != nil {
+ if err := r.reconcileClusterVersion(ctx, hcp); err != nil {
errs = append(errs, fmt.Errorf("failed to reconcile clusterversion: %w", err))
}
@@ -653,11 +653,11 @@ func (r *reconciler) reconcileKonnectivityAgent(ctx context.Context, hcp *hyperv
return errors.NewAggregate(errs)
}
-func (r *reconciler) reconcileClusterVersion(ctx context.Context) error {
+func (r *reconciler) reconcileClusterVersion(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { //TODO(akshay): not sure if I should've added the hcp here to use for the channel.
clusterVersion := &configv1.ClusterVersion{ObjectMeta: metav1.ObjectMeta{Name: "version"}}
if _, err := r.CreateOrUpdate(ctx, r.client, clusterVersion, func() error {
clusterVersion.Spec.Upstream = ""
- clusterVersion.Spec.Channel = ""
+ clusterVersion.Spec.Channel = hcp.Spec.Channel
clusterVersion.Spec.DesiredUpdate = nil
return nil
}); err != nil {
diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go
index f5b7c53ce6a..07aa5373e2b 100644
--- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go
+++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go
@@ -13,7 +13,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
cpomanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/api"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests"
diff --git a/control-plane-operator/hostedclusterconfigoperator/operator/config.go b/control-plane-operator/hostedclusterconfigoperator/operator/config.go
index 5d1e6278bcc..2b8a606d9c6 100644
--- a/control-plane-operator/hostedclusterconfigoperator/operator/config.go
+++ b/control-plane-operator/hostedclusterconfigoperator/operator/config.go
@@ -21,7 +21,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/api"
"github.com/openshift/hypershift/support/labelenforcingclient"
"github.com/openshift/hypershift/support/releaseinfo"
diff --git a/docs/content/reference/api.md b/docs/content/reference/api.md
index 24fee94446d..e2b60c70e20 100644
--- a/docs/content/reference/api.md
+++ b/docs/content/reference/api.md
@@ -8,12 +8,12 @@ title: API
Packages:
-hypershift.openshift.io/v1alpha1
+hypershift.openshift.io/v1beta1
-
Package v1alpha1 contains the HyperShift API.
+Package v1beta1 contains the HyperShift API.
The HyperShift API enables creating and managing lightweight, flexible, heterogeneous
OpenShift clusters at scale.
HyperShift clusters are deployed in a topology which isolates the “control plane”
@@ -21,7 +21,7 @@ OpenShift clusters at scale.
worker nodes and their kubelets, and the infrastructure on which they run). This
enables “hosted control plane as a service” use cases.
-##HostedCluster { #hypershift.openshift.io/v1alpha1.HostedCluster }
+##HostedCluster { #hypershift.openshift.io/v1beta1.HostedCluster }
HostedCluster is the primary representation of a HyperShift cluster and encapsulates
the control plane and common data plane configuration. Creating a HostedCluster
@@ -43,7 +43,7 @@ NodePool resources.
string
-hypershift.openshift.io/v1alpha1
+hypershift.openshift.io/v1beta1
|
@@ -72,7 +72,7 @@ Refer to the Kubernetes API documentation for the fields of the
spec
-
+
HostedClusterSpec
@@ -86,7 +86,7 @@ HostedClusterSpec
|
release
-
+
Release
@@ -120,6 +120,20 @@ immutable.
|
+channel
+
+string
+
+ |
+
+(Optional)
+ channel is an identifier for explicitly requesting that a non-default
+set of updates be applied to this cluster. The default channel will be
+contain stable updates that are appropriate for production clusters.
+ |
+
+
+
infraID
string
@@ -136,7 +150,7 @@ and its associated NodePools.
platform
-
+
PlatformSpec
@@ -150,7 +164,7 @@ and is used to configure platform specific behavior.
|
controllerAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -159,18 +173,13 @@ AvailabilityPolicy
(Optional)
ControllerAvailabilityPolicy specifies the availability policy applied to
critical control plane components. The default value is SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
|
infrastructureAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -180,18 +189,13 @@ AvailabilityPolicy
InfrastructureAvailabilityPolicy specifies the availability policy applied
to infrastructure services which run on cluster nodes. The default value is
SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
dns
-
+
DNSSpec
@@ -204,7 +208,7 @@ DNSSpec
|
networking
-
+
ClusterNetworking
@@ -217,7 +221,7 @@ ClusterNetworking
|
autoscaling
-
+
ClusterAutoscaling
@@ -232,7 +236,7 @@ associated with the control plane.
|
etcd
-
+
EtcdSpec
@@ -247,7 +251,7 @@ changed.
|
services
-
+
[]ServicePublishingStrategyMapping
@@ -326,7 +330,7 @@ signing key, a IssuerURL must also be specified.
|
configuration
-
+
ClusterConfiguration
@@ -362,7 +366,7 @@ name that corresponds to the constant AuditWebhookKubeconfigKey.
|
imageContentSources
-
+
[]ImageContentSource
@@ -392,7 +396,7 @@ PEM-encoded X.509 certificate bundle that will be added to the hosted controlpla
|
secretEncryption
-
+
SecretEncryptionSpec
@@ -436,7 +440,7 @@ provided: reconciliation is paused on the resource until the field is removed.
|
olmCatalogPlacement
-
+
OLMCatalogPlacement
@@ -447,11 +451,6 @@ OLMCatalogPlacement
this is set to management and OLM catalog components are deployed onto the management
cluster. If set to guest, the OLM catalog components will be deployed onto the guest
cluster.
-
-Value must be one of:
-"guest",
-"management"
-
|
@@ -473,7 +472,7 @@ map[string]string
status
-
+
HostedClusterStatus
@@ -484,7 +483,7 @@ HostedClusterStatus
|
-##NodePool { #hypershift.openshift.io/v1alpha1.NodePool }
+##NodePool { #hypershift.openshift.io/v1beta1.NodePool }
NodePool is a scalable set of worker nodes attached to a HostedCluster.
NodePool machine architectures are uniform within a given pool, and are
@@ -504,7 +503,7 @@ independent of the control plane’s underlying machine architecture.
string
-hypershift.openshift.io/v1alpha1
+hypershift.openshift.io/v1beta1
|
@@ -533,7 +532,7 @@ Refer to the Kubernetes API documentation for the fields of the
spec
-
+
NodePoolSpec
@@ -559,7 +558,7 @@ string
|
release
-
+
Release
@@ -574,7 +573,7 @@ machine properties (e.g. an AMI on the AWS platform).
|
platform
-
+
NodePoolPlatform
@@ -586,19 +585,6 @@ and is used to configure platform specific behavior.
|
-nodeCount
-
-int32
-
- |
-
-(Optional)
- Deprecated: Use Replicas instead. NodeCount will be dropped in the next
-api release.
- |
-
-
-
replicas
int32
@@ -614,7 +600,7 @@ unset, the default value is 0.
management
-
+
NodePoolManagement
@@ -628,7 +614,7 @@ upgrade strategies and auto-repair behaviors.
|
autoScaling
-
+
NodePoolAutoScaling
@@ -675,6 +661,39 @@ the purpose of the change. In future we plan to propagate this field in-place.
https://github.com/kubernetes-sigs/cluster-api/issues/5880
|
|
+
+
+pausedUntil
+
+string
+
+ |
+
+(Optional)
+ PausedUntil is a field that can be used to pause reconciliation on a resource.
+Either a date can be provided in RFC3339 format or a boolean. If a date is
+provided: reconciliation is paused on the resource until that date. If the boolean true is
+provided: reconciliation is paused on the resource until the field is removed.
+ |
+
+
+
+tuningConfig
+
+
+[]Kubernetes core/v1.LocalObjectReference
+
+
+ |
+
+ TuningConfig is a list of references to ConfigMaps containing serialized
+Tuned resources to define the tuning configuration to be applied to
+nodes in the NodePool. The Tuned API is defined here:
+https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go
+Each ConfigMap must have a single key named “tuned” whose value is the
+JSON or YAML of a serialized Tuned.
+ |
+
@@ -682,7 +701,7 @@ the purpose of the change. In future we plan to propagate this field in-place.
status
-
+
NodePoolStatus
@@ -693,10 +712,10 @@ NodePoolStatus
-###AESCBCSpec { #hypershift.openshift.io/v1alpha1.AESCBCSpec }
+###AESCBCSpec { #hypershift.openshift.io/v1beta1.AESCBCSpec }
(Appears on:
-SecretEncryptionSpec)
+SecretEncryptionSpec)
AESCBCSpec defines metadata about the AESCBC secret encryption strategy
@@ -739,10 +758,10 @@ secrets can continue to be decrypted until they are all re-encrypted with the ac
-###APIServerNetworking { #hypershift.openshift.io/v1alpha1.APIServerNetworking }
+###APIServerNetworking { #hypershift.openshift.io/v1beta1.APIServerNetworking }
(Appears on:
-ClusterNetworking)
+ClusterNetworking)
APIServerNetworking specifies how the APIServer is exposed inside a cluster
@@ -786,7 +805,7 @@ pods using host networking cannot listen on this port. If not specified,
|
allowedCIDRBlocks
-
+
[]CIDRBlock
@@ -799,10 +818,10 @@ This depends on underlying support by the cloud provider for Service LoadBalance
-###AWSCloudProviderConfig { #hypershift.openshift.io/v1alpha1.AWSCloudProviderConfig }
+###AWSCloudProviderConfig { #hypershift.openshift.io/v1beta1.AWSCloudProviderConfig }
(Appears on:
-AWSPlatformSpec)
+AWSPlatformSpec)
AWSCloudProviderConfig specifies AWS networking configuration.
@@ -819,7 +838,7 @@ This depends on underlying support by the cloud provider for Service LoadBalance
|
subnet
-
+
AWSResourceReference
@@ -855,10 +874,10 @@ string
-###AWSEndpointAccessType { #hypershift.openshift.io/v1alpha1.AWSEndpointAccessType }
+###AWSEndpointAccessType { #hypershift.openshift.io/v1beta1.AWSEndpointAccessType }
(Appears on:
-AWSPlatformSpec)
+AWSPlatformSpec)
AWSEndpointAccessType specifies the publishing scope of cluster endpoints.
@@ -884,10 +903,10 @@ private node communication with the control plane.
|
-###AWSKMSAuthSpec { #hypershift.openshift.io/v1alpha1.AWSKMSAuthSpec }
+###AWSKMSAuthSpec { #hypershift.openshift.io/v1beta1.AWSKMSAuthSpec }
(Appears on:
-AWSKMSSpec)
+AWSKMSSpec)
AWSKMSAuthSpec defines metadata about the management of credentials used to interact with AWS KMS
@@ -917,10 +936,10 @@ aws credentials file that can be used to configure AWS SDKs
-###AWSKMSKeyEntry { #hypershift.openshift.io/v1alpha1.AWSKMSKeyEntry }
+###AWSKMSKeyEntry { #hypershift.openshift.io/v1beta1.AWSKMSKeyEntry }
(Appears on:
-AWSKMSSpec)
+AWSKMSSpec)
AWSKMSKeyEntry defines metadata to locate the encryption key in AWS
@@ -946,10 +965,10 @@ string
-###AWSKMSSpec { #hypershift.openshift.io/v1alpha1.AWSKMSSpec }
+###AWSKMSSpec { #hypershift.openshift.io/v1beta1.AWSKMSSpec }
(Appears on:
-KMSSpec)
+KMSSpec)
AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider
@@ -977,7 +996,7 @@ string
activeKey
-
+
AWSKMSKeyEntry
@@ -990,7 +1009,7 @@ AWSKMSKeyEntry
|
backupKey
-
+
AWSKMSKeyEntry
@@ -1005,7 +1024,7 @@ secrets can continue to be decrypted until they are all re-encrypted with the ac
|
auth
-
+
AWSKMSAuthSpec
@@ -1016,10 +1035,10 @@ AWSKMSAuthSpec
-###AWSNodePoolPlatform { #hypershift.openshift.io/v1alpha1.AWSNodePoolPlatform }
+###AWSNodePoolPlatform { #hypershift.openshift.io/v1beta1.AWSNodePoolPlatform }
(Appears on:
-NodePoolPlatform)
+NodePoolPlatform)
AWSNodePoolPlatform specifies the configuration of a NodePool when operating
@@ -1059,7 +1078,7 @@ string
|
subnet
-
+
AWSResourceReference
@@ -1086,7 +1105,7 @@ is chosen based on the NodePool release payload image.
|
securityGroups
-
+
[]AWSResourceReference
@@ -1101,7 +1120,7 @@ instances.
|
rootVolume
-
+
Volume
@@ -1115,7 +1134,7 @@ Volume
|
resourceTags
-
+
[]AWSResourceTag
@@ -1134,10 +1153,10 @@ for the user.
-###AWSPlatformSpec { #hypershift.openshift.io/v1alpha1.AWSPlatformSpec }
+###AWSPlatformSpec { #hypershift.openshift.io/v1beta1.AWSPlatformSpec }
(Appears on:
-PlatformSpec)
+PlatformSpec)
AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services.
@@ -1167,7 +1186,7 @@ the correct boot AMI for a given release.
|
cloudProviderConfig
-
+
AWSCloudProviderConfig
@@ -1185,7 +1204,7 @@ TODO(dan): should this be named AWSNetworkConfig?
|
serviceEndpoints
-
+
[]AWSServiceEndpoint
@@ -1201,7 +1220,7 @@ the default service endpoint of specific AWS Services.
|
rolesRef
-
+
AWSRolesRef
@@ -1213,69 +1232,9 @@ integrations such as OIDC.
|
-roles
-
-
-[]AWSRoleCredentials
-
-
- |
-
- Deprecated
-This field will be removed in the next API release.
-Use RolesRef instead.
- |
-
-
-
-kubeCloudControllerCreds
-
-
-Kubernetes core/v1.LocalObjectReference
-
-
- |
-
- Deprecated
-This field will be removed in the next API release.
-Use RolesRef instead.
- |
-
-
-
-nodePoolManagementCreds
-
-
-Kubernetes core/v1.LocalObjectReference
-
-
- |
-
- Deprecated
-This field will be removed in the next API release.
-Use RolesRef instead.
- |
-
-
-
-controlPlaneOperatorCreds
-
-
-Kubernetes core/v1.LocalObjectReference
-
-
- |
-
- Deprecated
-This field will be removed in the next API release.
-Use RolesRef instead.
- |
-
-
-
resourceTags
-
+
[]AWSResourceTag
@@ -1294,7 +1253,7 @@ for the user.
|
endpointAccess
-
+
AWSEndpointAccessType
@@ -1303,21 +1262,15 @@ AWSEndpointAccessType
(Optional)
EndpointAccess specifies the publishing scope of cluster endpoints. The
default is Public.
-
-Value must be one of:
-"Private",
-"Public",
-"PublicAndPrivate"
-
|
-###AWSResourceReference { #hypershift.openshift.io/v1alpha1.AWSResourceReference }
+###AWSResourceReference { #hypershift.openshift.io/v1beta1.AWSResourceReference }
(Appears on:
-AWSCloudProviderConfig,
-AWSNodePoolPlatform)
+AWSCloudProviderConfig,
+AWSNodePoolPlatform)
AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
@@ -1360,7 +1313,7 @@ string
filters
-
+
[]Filter
@@ -1374,11 +1327,11 @@ They are applied according to the rules defined by the AWS API:
-###AWSResourceTag { #hypershift.openshift.io/v1alpha1.AWSResourceTag }
+###AWSResourceTag { #hypershift.openshift.io/v1beta1.AWSResourceTag }
(Appears on:
-AWSNodePoolPlatform,
-AWSPlatformSpec)
+AWSNodePoolPlatform,
+AWSPlatformSpec)
AWSResourceTag is a tag to apply to AWS resources created for the cluster.
@@ -1418,11 +1371,7 @@ requirements of all services.
-###AWSRoleCredentials { #hypershift.openshift.io/v1alpha1.AWSRoleCredentials }
-
-(Appears on:
-AWSPlatformSpec)
-
+###AWSRoleCredentials { #hypershift.openshift.io/v1beta1.AWSRoleCredentials }
@@ -1465,10 +1414,10 @@ string
-###AWSRolesRef { #hypershift.openshift.io/v1alpha1.AWSRolesRef }
+###AWSRolesRef { #hypershift.openshift.io/v1beta1.AWSRolesRef }
(Appears on:
-AWSPlatformSpec)
+AWSPlatformSpec)
AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API.
@@ -1860,10 +1809,10 @@ string
-###AWSServiceEndpoint { #hypershift.openshift.io/v1alpha1.AWSServiceEndpoint }
+###AWSServiceEndpoint { #hypershift.openshift.io/v1beta1.AWSServiceEndpoint }
(Appears on:
-AWSPlatformSpec)
+AWSPlatformSpec)
AWSServiceEndpoint stores the configuration for services to
@@ -1904,10 +1853,10 @@ This must be provided and cannot be empty.
-###AgentNodePoolPlatform { #hypershift.openshift.io/v1alpha1.AgentNodePoolPlatform }
+###AgentNodePoolPlatform { #hypershift.openshift.io/v1beta1.AgentNodePoolPlatform }
(Appears on:
-NodePoolPlatform)
+NodePoolPlatform)
AgentNodePoolPlatform specifies the configuration of a NodePool when operating
@@ -1938,10 +1887,10 @@ be selected for a Machine.
-###AgentPlatformSpec { #hypershift.openshift.io/v1alpha1.AgentPlatformSpec }
+###AgentPlatformSpec { #hypershift.openshift.io/v1beta1.AgentPlatformSpec }
(Appears on:
-PlatformSpec)
+PlatformSpec)
AgentPlatformSpec specifies configuration for agent-based installations.
@@ -1967,11 +1916,11 @@ string
-###AvailabilityPolicy { #hypershift.openshift.io/v1alpha1.AvailabilityPolicy }
+###AvailabilityPolicy { #hypershift.openshift.io/v1beta1.AvailabilityPolicy }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
AvailabilityPolicy specifies a high level availability policy for components.
@@ -1997,10 +1946,10 @@ toleration of full disruption of the component.
|
-###AzureNodePoolPlatform { #hypershift.openshift.io/v1alpha1.AzureNodePoolPlatform }
+###AzureNodePoolPlatform { #hypershift.openshift.io/v1beta1.AzureNodePoolPlatform }
(Appears on:
-NodePoolPlatform)
+NodePoolPlatform)
@@ -2079,10 +2028,10 @@ in a location that does not support AvailabilityZone.
-###AzurePlatformSpec { #hypershift.openshift.io/v1alpha1.AzurePlatformSpec }
+###AzurePlatformSpec { #hypershift.openshift.io/v1beta1.AzurePlatformSpec }
(Appears on:
-PlatformSpec)
+PlatformSpec)
@@ -2188,19 +2137,18 @@ string
-###CIDRBlock { #hypershift.openshift.io/v1alpha1.CIDRBlock }
+###CIDRBlock { #hypershift.openshift.io/v1beta1.CIDRBlock }
(Appears on:
-APIServerNetworking,
-HostedControlPlaneSpec)
+APIServerNetworking)
-###ClusterAutoscaling { #hypershift.openshift.io/v1alpha1.ClusterAutoscaling }
+###ClusterAutoscaling { #hypershift.openshift.io/v1beta1.ClusterAutoscaling }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
ClusterAutoscaling specifies auto-scaling behavior that applies to all
@@ -2270,11 +2218,11 @@ resources available. The default is -10.
-###ClusterConfiguration { #hypershift.openshift.io/v1alpha1.ClusterConfiguration }
+###ClusterConfiguration { #hypershift.openshift.io/v1beta1.ClusterConfiguration }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
ClusterConfiguration specifies configuration for individual OCP components in the
@@ -2293,57 +2241,6 @@ configuration API.
-secretRefs
-
-
-[]Kubernetes core/v1.LocalObjectReference
-
-
- |
-
-(Optional)
- SecretRefs holds references to any secrets referenced by configuration
-entries. Entries can reference the secrets using local object references.
-Deprecated
-This field is deprecated and will be removed in a future release
- |
-
-
-
-configMapRefs
-
-
-[]Kubernetes core/v1.LocalObjectReference
-
-
- |
-
-(Optional)
- ConfigMapRefs holds references to any configmaps referenced by
-configuration entries. Entries can reference the configmaps using local
-object references.
-Deprecated
-This field is deprecated and will be removed in a future release
- |
-
-
-
-items
-
-
-[]k8s.io/apimachinery/pkg/runtime.RawExtension
-
-
- |
-
-(Optional)
- Items embeds the serialized configuration resources.
-Deprecated
-This field is deprecated and will be removed in a future release
- |
-
-
-
apiServer
@@ -2484,10 +2381,10 @@ github.com/openshift/api/config/v1.ProxySpec
|
-###ClusterNetworkEntry { #hypershift.openshift.io/v1alpha1.ClusterNetworkEntry }
+###ClusterNetworkEntry { #hypershift.openshift.io/v1beta1.ClusterNetworkEntry }
(Appears on:
-ClusterNetworking)
+ClusterNetworking)
ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks
@@ -2530,11 +2427,11 @@ field is not used by the plugin, it can be left unset.
-###ClusterNetworking { #hypershift.openshift.io/v1alpha1.ClusterNetworking }
+###ClusterNetworking { #hypershift.openshift.io/v1beta1.ClusterNetworking }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
ClusterNetworking specifies network configuration for a cluster.
@@ -2549,81 +2446,36 @@ field is not used by the plugin, it can be left unset.
-serviceCIDR
-
-string
-
- |
-
-(Optional)
- Deprecated
-This field will be removed in the next API release.
-Use ServiceNetwork instead
- |
-
-
-
-podCIDR
-
-string
-
- |
-
-(Optional)
- Deprecated
-This field will be removed in the next API release.
-Use ClusterNetwork instead
- |
-
-
-
-machineCIDR
-
-string
-
- |
-
-(Optional)
- Deprecated
-This field will be removed in the next API release.
-Use MachineNetwork instead
- |
-
-
-
machineNetwork
-
+
[]MachineNetworkEntry
|
(Optional)
- MachineNetwork is the list of IP address pools for machines.
-TODO: make this required in the next version of the API
+MachineNetwork is the list of IP address pools for machines.
|
clusterNetwork
-
+
[]ClusterNetworkEntry
|
-(Optional)
- ClusterNetwork is the list of IP address pools for pods.
-TODO: make this required in the next version of the API
+ClusterNetwork is the list of IP address pools for pods.
|
serviceNetwork
-
+
[]ServiceNetworkEntry
@@ -2631,35 +2483,27 @@ TODO: make this required in the next version of the API
|
(Optional)
ServiceNetwork is the list of IP address pools for services.
-NOTE: currently only one entry is supported.
-TODO: make this required in the next version of the API
+NOTE: currently only one entry is supported.
|
networkType
-
+
NetworkType
|
NetworkType specifies the SDN provider used for cluster networking.
-
-Value must be one of:
-"Calico",
-"OVNKubernetes",
-"OpenShiftSDN",
-"Other"
-
|
apiServer
-
+
APIServerNetworking
@@ -2671,10 +2515,11 @@ how the APIServer is exposed inside a cluster node.
|
-###ClusterVersionStatus { #hypershift.openshift.io/v1alpha1.ClusterVersionStatus }
+###ClusterVersionStatus { #hypershift.openshift.io/v1beta1.ClusterVersionStatus }
(Appears on:
-HostedClusterStatus)
+HostedClusterStatus,
+HostedControlPlaneStatus)
ClusterVersionStatus reports the status of the cluster versioning,
@@ -2695,8 +2540,8 @@ progress, or is failing.
desired
-
-Release
+
+github.com/openshift/api/config/v1.Release
|
@@ -2739,9 +2584,46 @@ If this value is not equal to metadata.generation, then the desired
and conditions fields may represent a previous version.
+
+
+availableUpdates
+
+
+[]github.com/openshift/api/config/v1.Release
+
+
+ |
+
+ availableUpdates contains updates recommended for this
+cluster. Updates which appear in conditionalUpdates but not in
+availableUpdates may expose this cluster to known issues. This list
+may be empty if no updates are recommended, if the update service
+is unavailable, or if an invalid channel has been specified.
+ |
+
+
+
+conditionalUpdates
+
+
+[]github.com/openshift/api/config/v1.ConditionalUpdate
+
+
+ |
+
+(Optional)
+ conditionalUpdates contains the list of updates that may be
+recommended for this cluster if it meets specific required
+conditions. Consumers interested in the set of updates that are
+actually recommended for this cluster should use
+availableUpdates. This list may be empty if no updates are
+recommended, if the update service is unavailable, or if an empty
+or invalid channel has been specified.
+ |
+
-###ConditionType { #hypershift.openshift.io/v1alpha1.ConditionType }
+###ConditionType { #hypershift.openshift.io/v1beta1.ConditionType }
@@ -2759,8 +2641,22 @@ created in the guest VPC
AWSEndpointServiceAvailable indicates whether the AWS Endpoint Service
has been created for the specified NLB in the management VPC
|
-"ClusterVersionFailing" |
+
"CVOScaledDown" |
|
+
"CloudResourcesDestroyed" |
+ |
+
"ClusterVersionAvailable" |
+ClusterVersionAvailable bubbles up Failing configv1.OperatorAvailable from the CVO.
+ |
+
"ClusterVersionFailing" |
+ClusterVersionFailing bubbles up Failing from the CVO.
+ |
+
"ClusterVersionProgressing" |
+ClusterVersionProgressing bubbles up configv1.OperatorProgressing from the CVO.
+ |
+
"ClusterVersionReleaseAccepted" |
+ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO.
+ |
"ClusterVersionSucceeding" |
ClusterVersionSucceeding indicates the current status of the desired release
version of the HostedCluster as indicated by the Failing condition in the
@@ -2771,7 +2667,8 @@ underlying cluster’s ClusterVersion.
underlying cluster’s ClusterVersion.
|
"EtcdAvailable" |
- |
+EtcdAvailable bubbles up the same condition from HCP.
+ |
"EtcdSnapshotRestored" |
|
"Available" |
@@ -2795,19 +2692,21 @@ an initial deployment or upgrade.
HostedCluster is available to handle ignition requests.
"InfrastructureReady" |
- |
+InfrastructureReady bubbles up the same condition from HCP.
+ |
"KubeAPIServerAvailable" |
- |
+KubeAPIServerAvailable bubbles up the same condition from HCP.
+ |
"PlatformCredentialsFound" |
PlatformCredentialsFound indicates that credentials required for the
desired platform are valid.
|
"ReconciliationActive" |
-ReconciliationActive indicates if reconciliation of the hostedcluster is
+ | ReconciliationActive indicates if reconciliation of the HostedCluster is
active or paused.
|
"ReconciliationSucceeded" |
-ReconciliationSucceeded indicates if the hostedcluster reconciliation
+ | ReconciliationSucceeded indicates if the HostedCluster reconciliation
succeeded.
|
"SupportedHostedCluster" |
@@ -2826,7 +2725,8 @@ healthy.
ClusterConfiguration specified for the HostedCluster is valid.
"ValidHostedControlPlaneConfiguration" |
- |
+ValidHostedControlPlaneConfiguration bubbles up the same condition from HCP.
+ |
"ValidOIDCConfiguration" |
ValidOIDCConfiguration indicates if an AWS cluster’s OIDC condition is
detected as invalid.
@@ -2839,11 +2739,11 @@ unsupported upgrade e.g y-stream upgrade before 4.11.
|
-###DNSSpec { #hypershift.openshift.io/v1alpha1.DNSSpec }
+###DNSSpec { #hypershift.openshift.io/v1beta1.DNSSpec }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
DNSSpec specifies the DNS configuration in the cluster.
@@ -2895,10 +2795,10 @@ available internally to the cluster exist.
-###EtcdManagementType { #hypershift.openshift.io/v1alpha1.EtcdManagementType }
+###EtcdManagementType { #hypershift.openshift.io/v1beta1.EtcdManagementType }
(Appears on:
-EtcdSpec)
+EtcdSpec)
EtcdManagementType is a enum specifying the strategy for managing the cluster’s etcd instance
@@ -2920,11 +2820,11 @@ and the user is responsible for doing so.
-###EtcdSpec { #hypershift.openshift.io/v1alpha1.EtcdSpec }
+###EtcdSpec { #hypershift.openshift.io/v1beta1.EtcdSpec }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
EtcdSpec specifies configuration for a control plane etcd cluster.
@@ -2941,25 +2841,20 @@ and the user is responsible for doing so.
managementType
-
+
EtcdManagementType
|
ManagementType defines how the etcd cluster is managed.
-
-Value must be one of:
-"Managed",
-"Unmanaged"
-
|
managed
-
+
ManagedEtcdSpec
@@ -2973,7 +2868,7 @@ ManagedEtcdSpec
|
unmanaged
-
+
UnmanagedEtcdSpec
@@ -2986,10 +2881,10 @@ integrate with an eternally managed etcd cluster.
|
-###EtcdTLSConfig { #hypershift.openshift.io/v1alpha1.EtcdTLSConfig }
+###EtcdTLSConfig { #hypershift.openshift.io/v1beta1.EtcdTLSConfig }
(Appears on:
-UnmanagedEtcdSpec)
+UnmanagedEtcdSpec)
EtcdTLSConfig specifies TLS configuration for HTTPS etcd client endpoints.
@@ -3022,10 +2917,10 @@ etcd-client.key: Client certificate key value
-###Filter { #hypershift.openshift.io/v1alpha1.Filter }
+###Filter { #hypershift.openshift.io/v1beta1.Filter }
(Appears on:
-AWSResourceReference)
+AWSResourceReference)
Filter is a filter used to identify an AWS resource
@@ -3062,10 +2957,10 @@ string
-###HostedClusterSpec { #hypershift.openshift.io/v1alpha1.HostedClusterSpec }
+###HostedClusterSpec { #hypershift.openshift.io/v1beta1.HostedClusterSpec }
(Appears on:
-HostedCluster)
+HostedCluster)
HostedClusterSpec is the desired behavior of a HostedCluster.
@@ -3082,7 +2977,7 @@ string
release
-
+
Release
@@ -3116,6 +3011,20 @@ immutable.
|
+channel
+
+string
+
+ |
+
+(Optional)
+ channel is an identifier for explicitly requesting that a non-default
+set of updates be applied to this cluster. The default channel will be
+contain stable updates that are appropriate for production clusters.
+ |
+
+
+
infraID
string
@@ -3132,7 +3041,7 @@ and its associated NodePools.
platform
-
+
PlatformSpec
@@ -3146,7 +3055,7 @@ and is used to configure platform specific behavior.
|
controllerAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -3155,18 +3064,13 @@ AvailabilityPolicy
(Optional)
ControllerAvailabilityPolicy specifies the availability policy applied to
critical control plane components. The default value is SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
|
infrastructureAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -3176,18 +3080,13 @@ AvailabilityPolicy
InfrastructureAvailabilityPolicy specifies the availability policy applied
to infrastructure services which run on cluster nodes. The default value is
SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
dns
-
+
DNSSpec
@@ -3200,7 +3099,7 @@ DNSSpec
|
networking
-
+
ClusterNetworking
@@ -3213,7 +3112,7 @@ ClusterNetworking
|
autoscaling
-
+
ClusterAutoscaling
@@ -3228,7 +3127,7 @@ associated with the control plane.
|
etcd
-
+
EtcdSpec
@@ -3243,7 +3142,7 @@ changed.
|
services
-
+
[]ServicePublishingStrategyMapping
@@ -3322,7 +3221,7 @@ signing key, a IssuerURL must also be specified.
|
configuration
-
+
ClusterConfiguration
@@ -3358,7 +3257,7 @@ name that corresponds to the constant AuditWebhookKubeconfigKey.
|
imageContentSources
-
+
[]ImageContentSource
@@ -3388,7 +3287,7 @@ PEM-encoded X.509 certificate bundle that will be added to the hosted controlpla
|
secretEncryption
-
+
SecretEncryptionSpec
@@ -3432,7 +3331,7 @@ provided: reconciliation is paused on the resource until the field is removed.
|
olmCatalogPlacement
-
+
OLMCatalogPlacement
@@ -3443,11 +3342,6 @@ OLMCatalogPlacement
this is set to management and OLM catalog components are deployed onto the management
cluster. If set to guest, the OLM catalog components will be deployed onto the guest
cluster.
-
-Value must be one of:
-"guest",
-"management"
-
|
@@ -3464,10 +3358,10 @@ map[string]string
-###HostedClusterStatus { #hypershift.openshift.io/v1alpha1.HostedClusterStatus }
+###HostedClusterStatus { #hypershift.openshift.io/v1beta1.HostedClusterStatus }
(Appears on:
-HostedCluster)
+HostedCluster)
HostedClusterStatus is the latest observed status of a HostedCluster.
@@ -3484,7 +3378,7 @@ map[string]string
version
-
+
ClusterVersionStatus
@@ -3540,6 +3434,21 @@ It exposes the config for instances to become kubernetes nodes.
|
+controlPlaneEndpoint
+
+
+APIEndpoint
+
+
+ |
+
+ ControlPlaneEndpoint contains the endpoint information by which
+external clients can access the control plane. This is populated
+after the infrastructure is ready.
+ |
+
+
+
oauthCallbackURLTemplate
string
@@ -3562,13 +3471,14 @@ This is populated after the infrastructure is ready.
|
+(Optional)
Conditions represents the latest available observations of a control
plane’s current state.
|
-###HostedControlPlaneSpec { #hypershift.openshift.io/v1alpha1.HostedControlPlaneSpec }
+###HostedControlPlaneSpec { #hypershift.openshift.io/v1beta1.HostedControlPlaneSpec }
HostedControlPlaneSpec defines the desired state of HostedControlPlane
@@ -3588,105 +3498,62 @@ string
+ ReleaseImage is the release image applied to the hosted control plane.
|
-pullSecret
-
-
-Kubernetes core/v1.LocalObjectReference
-
-
- |
-
- |
-
-
-
-issuerURL
-
-string
-
- |
-
- |
-
-
-
-networking
-
-
-ClusterNetworking
-
-
- |
-
-(Optional)
- Networking specifies network configuration for the cluster.
-Temporarily optional for backward compatibility, required in future releases.
- |
-
-
-
-serviceCIDR
+channel
string
|
(Optional)
- deprecated
-use networking.ServiceNetwork
+channel is an identifier for explicitly requesting that a non-default
+set of updates be applied to this cluster. The default channel will be
+contain stable updates that are appropriate for production clusters.
|
-podCIDR
+pullSecret
-string
+
+Kubernetes core/v1.LocalObjectReference
+
|
-(Optional)
- deprecated
-use networking.ClusterNetwork
|
-machineCIDR
+issuerURL
string
|
-(Optional)
- deprecated
-use networking.MachineNetwork
+IssuerURL is an OIDC issuer URL which is used as the issuer in all
+ServiceAccount tokens generated by the control plane API server. The
+default value is kubernetes.default.svc, which only works for in-cluster
+validation.
|
-networkType
+networking
-
-NetworkType
+
+ClusterNetworking
|
(Optional)
- deprecated
-use networking.NetworkType
-NetworkType specifies the SDN provider used for cluster networking.
-
-Value must be one of:
-"Calico",
-"OVNKubernetes",
-"OpenShiftSDN",
-"Other"
-
+Networking specifies network configuration for the cluster.
+Temporarily optional for backward compatibility, required in future releases.
|
@@ -3730,7 +3597,7 @@ string
platform
-
+
PlatformSpec
@@ -3742,7 +3609,7 @@ PlatformSpec
|
dns
-
+
DNSSpec
@@ -3769,56 +3636,9 @@ be generated automatically for the cluster.
|
-apiPort
-
-int32
-
- |
-
-(Optional)
- deprecated
-use networking.apiServer.APIPort
-APIPort is the port at which the APIServer listens inside a worker
- |
-
-
-
-apiAdvertiseAddress
-
-string
-
- |
-
-(Optional)
- deprecated
-use networking.apiServer.AdvertiseAddress
-APIAdvertiseAddress is the address at which the APIServer listens
-inside a worker.
- |
-
-
-
-apiAllowedCIDRBlocks
-
-
-[]CIDRBlock
-
-
- |
-
-(Optional)
- deprecated
-use networking.apiServer.APIAllowedCIDRBlocks
-APIAllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer
-If not specified, traffic is allowed from all addresses.
-This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges
- |
-
-
-
controllerAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -3827,18 +3647,13 @@ AvailabilityPolicy
(Optional)
ControllerAvailabilityPolicy specifies the availability policy applied to
critical control plane components. The default value is SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
infrastructureAvailabilityPolicy
-
+
AvailabilityPolicy
@@ -3848,11 +3663,6 @@ AvailabilityPolicy
InfrastructureAvailabilityPolicy specifies the availability policy applied
to infrastructure services which run on cluster nodes. The default value is
SingleReplica.
-
-Value must be one of:
-"HighlyAvailable",
-"SingleReplica"
-
|
@@ -3871,7 +3681,7 @@ bool
kubeconfig
-
+
KubeconfigSecretRef
@@ -3885,7 +3695,7 @@ KubeconfigSecretRef
|
services
-
+
[]ServicePublishingStrategyMapping
@@ -3918,7 +3728,7 @@ in the secret with a secret key name that corresponds to the constant AuditWebho
|
etcd
-
+
EtcdSpec
@@ -3932,7 +3742,7 @@ use to store data.
|
configuration
-
+
ClusterConfiguration
@@ -3946,7 +3756,7 @@ ClusterConfiguration
|
imageContentSources
-
+
[]ImageContentSource
@@ -3974,7 +3784,7 @@ Kubernetes core/v1.LocalObjectReference
|
secretEncryption
-
+
SecretEncryptionSpec
@@ -4004,7 +3814,7 @@ provided: reconciliation is paused on the resource until the field is removed.
|
olmCatalogPlacement
-
+
OLMCatalogPlacement
@@ -4015,18 +3825,13 @@ OLMCatalogPlacement
this is set to management and OLM catalog components are deployed onto the management
cluster. If set to guest, the OLM catalog components will be deployed onto the guest
cluster.
-
-Value must be one of:
-"guest",
-"management"
-
|
autoscaling
-
+
ClusterAutoscaling
@@ -4051,7 +3856,7 @@ map[string]string
|
-###HostedControlPlaneStatus { #hypershift.openshift.io/v1alpha1.HostedControlPlaneStatus }
+###HostedControlPlaneStatus { #hypershift.openshift.io/v1beta1.HostedControlPlaneStatus }
HostedControlPlaneStatus defines the observed state of HostedControlPlane
@@ -4108,7 +3913,7 @@ is managed by an external service.
controlPlaneEndpoint
-
+
APIEndpoint
@@ -4135,6 +3940,21 @@ This is populated after the infrastructure is ready.
|
+versionStatus
+
+
+ClusterVersionStatus
+
+
+ |
+
+(Optional)
+ versionStatus is the status of the release version applied by the
+hosted control plane operator.
+ |
+
+
+
version
string
@@ -4143,6 +3963,7 @@ string
Version is the semantic version of the release applied by
the hosted control plane operator
+Deprecated: Use versionStatus.desired.version instead.
|
|
@@ -4153,7 +3974,9 @@ string
|
+(Optional)
ReleaseImage is the release image applied to the hosted control plane.
+Deprecated: Use versionStatus.desired.image instead.
|
@@ -4168,13 +3991,14 @@ Kubernetes meta/v1.Time
|
lastReleaseImageTransitionTime is the time of the last update to the current
releaseImage property.
+Deprecated: Use versionStatus.history[0].startedTime instead.
|
kubeConfig
-
+
KubeconfigSecretRef
@@ -4209,16 +4033,17 @@ for the guest cluster.
|
+(Optional)
Condition contains details for one aspect of the current state of the HostedControlPlane.
Current condition types are: “Available”
|
-###IBMCloudKMSAuthSpec { #hypershift.openshift.io/v1alpha1.IBMCloudKMSAuthSpec }
+###IBMCloudKMSAuthSpec { #hypershift.openshift.io/v1beta1.IBMCloudKMSAuthSpec }
(Appears on:
-IBMCloudKMSSpec)
+IBMCloudKMSSpec)
IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS
@@ -4235,25 +4060,20 @@ Current condition types are: “Available”
type
-
+
IBMCloudKMSAuthType
|
Type defines the IBM Cloud KMS authentication strategy
-
-Value must be one of:
-"Managed",
-"Unmanaged"
-
|
unmanaged
-
+
IBMCloudKMSUnmanagedAuthSpec
@@ -4267,7 +4087,7 @@ IBMCloudKMSUnmanagedAuthSpec
|
managed
-
+
IBMCloudKMSManagedAuthSpec
@@ -4280,10 +4100,10 @@ KMS system (all provider managed).
|
-###IBMCloudKMSAuthType { #hypershift.openshift.io/v1alpha1.IBMCloudKMSAuthType }
+###IBMCloudKMSAuthType { #hypershift.openshift.io/v1beta1.IBMCloudKMSAuthType }
(Appears on:
-IBMCloudKMSAuthSpec)
+IBMCloudKMSAuthSpec)
IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy
@@ -4305,10 +4125,10 @@ authentication to interact with IBM Cloud KMS APIs
-###IBMCloudKMSKeyEntry { #hypershift.openshift.io/v1alpha1.IBMCloudKMSKeyEntry }
+###IBMCloudKMSKeyEntry { #hypershift.openshift.io/v1beta1.IBMCloudKMSKeyEntry }
(Appears on:
-IBMCloudKMSSpec)
+IBMCloudKMSSpec)
IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key
@@ -4379,19 +4199,19 @@ key is enabled for data encryption.
-###IBMCloudKMSManagedAuthSpec { #hypershift.openshift.io/v1alpha1.IBMCloudKMSManagedAuthSpec }
+###IBMCloudKMSManagedAuthSpec { #hypershift.openshift.io/v1beta1.IBMCloudKMSManagedAuthSpec }
(Appears on:
-IBMCloudKMSAuthSpec)
+IBMCloudKMSAuthSpec)
IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud
KMS system (all provider managed).
-###IBMCloudKMSSpec { #hypershift.openshift.io/v1alpha1.IBMCloudKMSSpec }
+###IBMCloudKMSSpec { #hypershift.openshift.io/v1beta1.IBMCloudKMSSpec }
(Appears on:
-KMSSpec)
+KMSSpec)
IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy
@@ -4419,7 +4239,7 @@ string
auth
-
+
IBMCloudKMSAuthSpec
@@ -4432,7 +4252,7 @@ IBMCloudKMSAuthSpec
|
keyList
-
+
[]IBMCloudKMSKeyEntry
@@ -4443,10 +4263,10 @@ IBMCloudKMSAuthSpec
-###IBMCloudKMSUnmanagedAuthSpec { #hypershift.openshift.io/v1alpha1.IBMCloudKMSUnmanagedAuthSpec }
+###IBMCloudKMSUnmanagedAuthSpec { #hypershift.openshift.io/v1beta1.IBMCloudKMSUnmanagedAuthSpec }
(Appears on:
-IBMCloudKMSAuthSpec)
+IBMCloudKMSAuthSpec)
IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS
@@ -4475,11 +4295,11 @@ call IBM Cloud KMS APIs
-###IBMCloudPlatformSpec { #hypershift.openshift.io/v1alpha1.IBMCloudPlatformSpec }
+###IBMCloudPlatformSpec { #hypershift.openshift.io/v1beta1.IBMCloudPlatformSpec }
(Appears on:
-NodePoolPlatform,
-PlatformSpec)
+NodePoolPlatform,
+PlatformSpec)
IBMCloudPlatformSpec defines IBMCloud specific settings for components
@@ -4507,11 +4327,11 @@ github.com/openshift/api/config/v1.IBMCloudProviderType
-###ImageContentSource { #hypershift.openshift.io/v1alpha1.ImageContentSource }
+###ImageContentSource { #hypershift.openshift.io/v1beta1.ImageContentSource }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
ImageContentSource specifies image mirrors that can be used by cluster nodes
@@ -4553,19 +4373,53 @@ specifications.
-###InPlaceUpgrade { #hypershift.openshift.io/v1alpha1.InPlaceUpgrade }
+###InPlaceUpgrade { #hypershift.openshift.io/v1beta1.InPlaceUpgrade }
(Appears on:
-NodePoolManagement)
+NodePoolManagement)
InPlaceUpgrade specifies an upgrade strategy which upgrades nodes in-place
without any new nodes being created or any old nodes being deleted.
-###KMSProvider { #hypershift.openshift.io/v1alpha1.KMSProvider }
+
+
+
+| Field |
+Description |
+
+
+
+
+
+maxUnavailable
+
+
+k8s.io/apimachinery/pkg/util/intstr.IntOrString
+
+
+ |
+
+(Optional)
+ MaxUnavailable is the maximum number of nodes that can be unavailable
+during the update.
+Value can be an absolute number (ex: 5) or a percentage of desired nodes
+(ex: 10%).
+Absolute number is calculated from percentage by rounding down.
+Defaults to 1.
+Example: when this is set to 30%, a max of 30% of the nodes can be made
+unschedulable/unavailable immediately when the update starts. Once a set
+of nodes is updated, more nodes can be made unschedulable for update,
+ensuring that the total number of nodes schedulable at all times during
+the update is at least 70% of desired nodes.
+ |
+
+
+
+###KMSProvider { #hypershift.openshift.io/v1beta1.KMSProvider }
(Appears on:
-KMSSpec)
+KMSSpec)
KMSProvider defines the supported KMS providers
@@ -4583,10 +4437,10 @@ without any new nodes being created or any old nodes being deleted.
| |
-###KMSSpec { #hypershift.openshift.io/v1alpha1.KMSSpec }
+###KMSSpec { #hypershift.openshift.io/v1beta1.KMSSpec }
(Appears on:
-SecretEncryptionSpec)
+SecretEncryptionSpec)
KMSSpec defines metadata about the kms secret encryption strategy
@@ -4603,25 +4457,20 @@ without any new nodes being created or any old nodes being deleted.
provider
-
+
KMSProvider
|
Provider defines the KMS provider
-
-Value must be one of:
-"AWS",
-"IBMCloud"
-
|
ibmcloud
-
+
IBMCloudKMSSpec
@@ -4635,7 +4484,7 @@ IBMCloudKMSSpec
|
aws
-
+
AWSKMSSpec
@@ -4647,10 +4496,10 @@ AWSKMSSpec
|
-###KubevirtCompute { #hypershift.openshift.io/v1alpha1.KubevirtCompute }
+###KubevirtCompute { #hypershift.openshift.io/v1beta1.KubevirtCompute }
(Appears on:
-KubevirtNodePoolPlatform)
+KubevirtNodePoolPlatform)
KubevirtCompute contains values associated with the virtual compute hardware requested for the VM.
@@ -4691,10 +4540,10 @@ uint32
-###KubevirtDiskImage { #hypershift.openshift.io/v1alpha1.KubevirtDiskImage }
+###KubevirtDiskImage { #hypershift.openshift.io/v1beta1.KubevirtDiskImage }
(Appears on:
-KubevirtRootVolume)
+KubevirtRootVolume)
KubevirtDiskImage contains values representing where the rhcos image is located
@@ -4721,10 +4570,10 @@ string
-###KubevirtNodePoolPlatform { #hypershift.openshift.io/v1alpha1.KubevirtNodePoolPlatform }
+###KubevirtNodePoolPlatform { #hypershift.openshift.io/v1beta1.KubevirtNodePoolPlatform }
(Appears on:
-NodePoolPlatform)
+NodePoolPlatform)
KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating
@@ -4742,7 +4591,7 @@ on KubeVirt platform.
rootVolume
-
+
KubevirtRootVolume
@@ -4755,7 +4604,7 @@ KubevirtRootVolume
|
compute
-
+
KubevirtCompute
@@ -4767,10 +4616,10 @@ KubevirtCompute
-###KubevirtPersistentVolume { #hypershift.openshift.io/v1alpha1.KubevirtPersistentVolume }
+###KubevirtPersistentVolume { #hypershift.openshift.io/v1beta1.KubevirtPersistentVolume }
(Appears on:
-KubevirtVolume)
+KubevirtVolume)
KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM.
@@ -4813,7 +4662,7 @@ string
|
accessModes
-
+
[]PersistentVolumeAccessMode
@@ -4826,10 +4675,10 @@ More info: KubevirtNodePoolPlatform)
+KubevirtNodePoolPlatform)
KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from.
@@ -4846,7 +4695,7 @@ More info:
+
KubevirtDiskImage
@@ -4860,7 +4709,7 @@ KubevirtDiskImage
|
KubevirtVolume
-
+
KubevirtVolume
@@ -4874,10 +4723,10 @@ KubevirtVolume
-###KubevirtVolume { #hypershift.openshift.io/v1alpha1.KubevirtVolume }
+###KubevirtVolume { #hypershift.openshift.io/v1beta1.KubevirtVolume }
(Appears on:
-KubevirtRootVolume)
+KubevirtRootVolume)
KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume
@@ -4894,7 +4743,7 @@ KubevirtVolume
|
type
-
+
KubevirtVolumeType
@@ -4902,17 +4751,13 @@ KubevirtVolumeType
|
(Optional)
Type represents the type of storage to associate with the kubevirt VMs.
-
-Value must be one of:
-"Persistent"
-
|
persistent
-
+
KubevirtPersistentVolume
@@ -4926,10 +4771,10 @@ This is the default type used when no storage type is defined.
|
-###KubevirtVolumeType { #hypershift.openshift.io/v1alpha1.KubevirtVolumeType }
+###KubevirtVolumeType { #hypershift.openshift.io/v1beta1.KubevirtVolumeType }
(Appears on:
-KubevirtVolume)
+KubevirtVolume)
KubevirtVolumeType is a specific supported KubeVirt volumes
@@ -4946,10 +4791,10 @@ This is the default type used when no storage type is defined.
-###LoadBalancerPublishingStrategy { #hypershift.openshift.io/v1alpha1.LoadBalancerPublishingStrategy }
+###LoadBalancerPublishingStrategy { #hypershift.openshift.io/v1beta1.LoadBalancerPublishingStrategy }
(Appears on:
-ServicePublishingStrategy)
+ServicePublishingStrategy)
LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer.
@@ -4976,10 +4821,10 @@ string
-###MachineNetworkEntry { #hypershift.openshift.io/v1alpha1.MachineNetworkEntry }
+###MachineNetworkEntry { #hypershift.openshift.io/v1beta1.MachineNetworkEntry }
(Appears on:
-ClusterNetworking)
+ClusterNetworking)
MachineNetworkEntry is a single IP address block for node IP blocks.
@@ -5007,10 +4852,10 @@ github.com/openshift/hypershift/api/util/ipnet.IPNet
-###ManagedEtcdSpec { #hypershift.openshift.io/v1alpha1.ManagedEtcdSpec }
+###ManagedEtcdSpec { #hypershift.openshift.io/v1beta1.ManagedEtcdSpec }
(Appears on:
-EtcdSpec)
+EtcdSpec)
ManagedEtcdSpec specifies the behavior of an etcd cluster managed by
@@ -5028,7 +4873,7 @@ HyperShift.
storage
-
+
ManagedEtcdStorageSpec
@@ -5039,10 +4884,10 @@ ManagedEtcdStorageSpec
-###ManagedEtcdStorageSpec { #hypershift.openshift.io/v1alpha1.ManagedEtcdStorageSpec }
+###ManagedEtcdStorageSpec { #hypershift.openshift.io/v1beta1.ManagedEtcdStorageSpec }
(Appears on:
-ManagedEtcdSpec)
+ManagedEtcdSpec)
ManagedEtcdStorageSpec describes the storage configuration for etcd data.
@@ -5059,24 +4904,20 @@ ManagedEtcdStorageSpec
|
type
-
+
ManagedEtcdStorageType
|
Type is the kind of persistent storage implementation to use for etcd.
-
-Value must be one of:
-"PersistentVolume"
-
|
persistentVolume
-
+
PersistentVolumeEtcdStorageSpec
@@ -5107,10 +4948,10 @@ is empty.
|
-###ManagedEtcdStorageType { #hypershift.openshift.io/v1alpha1.ManagedEtcdStorageType }
+###ManagedEtcdStorageType { #hypershift.openshift.io/v1beta1.ManagedEtcdStorageType }
(Appears on:
-ManagedEtcdStorageSpec)
+ManagedEtcdStorageSpec)
ManagedEtcdStorageType is a storage type for an etcd cluster.
@@ -5127,11 +4968,10 @@ is empty.
-###NetworkType { #hypershift.openshift.io/v1alpha1.NetworkType }
+###NetworkType { #hypershift.openshift.io/v1beta1.NetworkType }
(Appears on:
-ClusterNetworking,
-HostedControlPlaneSpec)
+ClusterNetworking)
NetworkType specifies the SDN provider used for cluster networking.
@@ -5157,10 +4997,10 @@ is empty.
-###NodePoolAutoScaling { #hypershift.openshift.io/v1alpha1.NodePoolAutoScaling }
+###NodePoolAutoScaling { #hypershift.openshift.io/v1beta1.NodePoolAutoScaling }
(Appears on:
-NodePoolSpec)
+NodePoolSpec)
NodePoolAutoScaling specifies auto-scaling behavior for a NodePool.
@@ -5197,10 +5037,10 @@ int32
-###NodePoolCondition { #hypershift.openshift.io/v1alpha1.NodePoolCondition }
+###NodePoolCondition { #hypershift.openshift.io/v1beta1.NodePoolCondition }
(Appears on:
-NodePoolStatus)
+NodePoolStatus)
We define our own condition type since metav1.Condition has validation
@@ -5309,10 +5149,10 @@ int64
-###NodePoolManagement { #hypershift.openshift.io/v1alpha1.NodePoolManagement }
+###NodePoolManagement { #hypershift.openshift.io/v1beta1.NodePoolManagement }
(Appears on:
-NodePoolSpec)
+NodePoolSpec)
NodePoolManagement specifies behavior for managing nodes in a NodePool, such
@@ -5330,25 +5170,20 @@ as upgrade strategies and auto-repair behaviors.
upgradeType
-
+
UpgradeType
|
UpgradeType specifies the type of strategy for handling upgrades.
-
-Value must be one of:
-"InPlace",
-"Replace"
-
|
replace
-
+
ReplaceUpgrade
@@ -5361,7 +5196,7 @@ ReplaceUpgrade
|
inPlace
-
+
InPlaceUpgrade
@@ -5385,10 +5220,10 @@ in the NodePool. The default is false.
|
-###NodePoolPlatform { #hypershift.openshift.io/v1alpha1.NodePoolPlatform }
+###NodePoolPlatform { #hypershift.openshift.io/v1beta1.NodePoolPlatform }
(Appears on:
-NodePoolSpec)
+NodePoolSpec)
NodePoolPlatform specifies the underlying infrastructure provider for the
@@ -5406,30 +5241,20 @@ NodePool and is used to configure platform specific behavior.
type
-
+
PlatformType
|
Type specifies the platform name.
-
-Value must be one of:
-"AWS",
-"Agent",
-"Azure",
-"IBMCloud",
-"KubeVirt",
-"None",
-"PowerVS"
-
|
aws
-
+
AWSNodePoolPlatform
@@ -5443,7 +5268,7 @@ AWSNodePoolPlatform
|
ibmcloud
-
+
IBMCloudPlatformSpec
@@ -5456,7 +5281,7 @@ IBMCloudPlatformSpec
|
kubevirt
-
+
KubevirtNodePoolPlatform
@@ -5470,7 +5295,7 @@ KubevirtNodePoolPlatform
|
agent
-
+
AgentNodePoolPlatform
@@ -5484,7 +5309,7 @@ AgentNodePoolPlatform
|
azure
-
+
AzureNodePoolPlatform
@@ -5496,7 +5321,7 @@ AzureNodePoolPlatform
|
powervs
-
+
PowerVSNodePoolPlatform
@@ -5508,10 +5333,10 @@ PowerVSNodePoolPlatform
|
-###NodePoolSpec { #hypershift.openshift.io/v1alpha1.NodePoolSpec }
+###NodePoolSpec { #hypershift.openshift.io/v1beta1.NodePoolSpec }
(Appears on:
-NodePool)
+NodePool)
NodePoolSpec is the desired behavior of a NodePool.
@@ -5540,7 +5365,7 @@ string
release
-
+
Release
@@ -5555,7 +5380,7 @@ machine properties (e.g. an AMI on the AWS platform).
|
platform
-
+
NodePoolPlatform
@@ -5567,19 +5392,6 @@ and is used to configure platform specific behavior.
|
-nodeCount
-
-int32
-
- |
-
-(Optional)
- Deprecated: Use Replicas instead. NodeCount will be dropped in the next
-api release.
- |
-
-
-
replicas
int32
@@ -5595,7 +5407,7 @@ unset, the default value is 0.
management
-
+
NodePoolManagement
@@ -5609,7 +5421,7 @@ upgrade strategies and auto-repair behaviors.
|
autoScaling
-
+
NodePoolAutoScaling
@@ -5656,12 +5468,45 @@ the purpose of the change. In future we plan to propagate this field in-place.
https://github.com/kubernetes-sigs/cluster-api/issues/5880
|
|
+
+
+pausedUntil
+
+string
+
+ |
+
+(Optional)
+ PausedUntil is a field that can be used to pause reconciliation on a resource.
+Either a date can be provided in RFC3339 format or a boolean. If a date is
+provided: reconciliation is paused on the resource until that date. If the boolean true is
+provided: reconciliation is paused on the resource until the field is removed.
+ |
+
+
+
+tuningConfig
+
+
+[]Kubernetes core/v1.LocalObjectReference
+
+
+ |
+
+ TuningConfig is a list of references to ConfigMaps containing serialized
+Tuned resources to define the tuning configuration to be applied to
+nodes in the NodePool. The Tuned API is defined here:
+https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go
+Each ConfigMap must have a single key named “tuned” whose value is the
+JSON or YAML of a serialized Tuned.
+ |
+
-###NodePoolStatus { #hypershift.openshift.io/v1alpha1.NodePoolStatus }
+###NodePoolStatus { #hypershift.openshift.io/v1beta1.NodePoolStatus }
(Appears on:
-NodePool)
+NodePool)
NodePoolStatus is the latest observed status of a NodePool.
@@ -5702,22 +5547,23 @@ the NodePool.
conditions
-
+
[]NodePoolCondition
|
+(Optional)
Conditions represents the latest available observations of the node pool’s
current state.
|
-###NodePortPublishingStrategy { #hypershift.openshift.io/v1alpha1.NodePortPublishingStrategy }
+###NodePortPublishingStrategy { #hypershift.openshift.io/v1beta1.NodePortPublishingStrategy }
(Appears on:
-ServicePublishingStrategy)
+ServicePublishingStrategy)
NodePortPublishingStrategy specifies a NodePort used to expose a service.
@@ -5755,11 +5601,11 @@ assigned when the service is created.
-###OLMCatalogPlacement { #hypershift.openshift.io/v1alpha1.OLMCatalogPlacement }
+###OLMCatalogPlacement { #hypershift.openshift.io/v1beta1.OLMCatalogPlacement }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
OLMCatalogPlacement is an enum specifying the placement of OLM catalog components.
@@ -5781,17 +5627,17 @@ the management cluster.
-###PersistentVolumeAccessMode { #hypershift.openshift.io/v1alpha1.PersistentVolumeAccessMode }
+###PersistentVolumeAccessMode { #hypershift.openshift.io/v1beta1.PersistentVolumeAccessMode }
(Appears on:
-KubevirtPersistentVolume)
+KubevirtPersistentVolume)
-###PersistentVolumeEtcdStorageSpec { #hypershift.openshift.io/v1alpha1.PersistentVolumeEtcdStorageSpec }
+###PersistentVolumeEtcdStorageSpec { #hypershift.openshift.io/v1beta1.PersistentVolumeEtcdStorageSpec }
(Appears on:
-ManagedEtcdStorageSpec)
+ManagedEtcdStorageSpec)
PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume
@@ -5834,11 +5680,11 @@ k8s.io/apimachinery/pkg/api/resource.Quantity
-###PlatformSpec { #hypershift.openshift.io/v1alpha1.PlatformSpec }
+###PlatformSpec { #hypershift.openshift.io/v1beta1.PlatformSpec }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
PlatformSpec specifies the underlying infrastructure provider for the cluster
@@ -5856,30 +5702,20 @@ and is used to configure platform specific behavior.
type
-
+
PlatformType
|
Type is the type of infrastructure provider for the cluster.
-
-Value must be one of:
-"AWS",
-"Agent",
-"Azure",
-"IBMCloud",
-"KubeVirt",
-"None",
-"PowerVS"
-
|
aws
-
+
AWSPlatformSpec
@@ -5893,7 +5729,7 @@ AWSPlatformSpec
|
agent
-
+
AgentPlatformSpec
@@ -5907,7 +5743,7 @@ AgentPlatformSpec
|
ibmcloud
-
+
IBMCloudPlatformSpec
@@ -5920,7 +5756,7 @@ IBMCloudPlatformSpec
|
azure
-
+
AzurePlatformSpec
@@ -5933,7 +5769,7 @@ AzurePlatformSpec
|
powervs
-
+
PowerVSPlatformSpec
@@ -5946,11 +5782,11 @@ This field is immutable. Once set, It can’t be changed.
|
-###PlatformType { #hypershift.openshift.io/v1alpha1.PlatformType }
+###PlatformType { #hypershift.openshift.io/v1beta1.PlatformType }
(Appears on:
-NodePoolPlatform,
-PlatformSpec)
+NodePoolPlatform,
+PlatformSpec)
PlatformType is a specific supported infrastructure provider.
@@ -5985,10 +5821,18 @@ This field is immutable. Once set, It can’t be changed.
-###PowerVSNodePoolPlatform { #hypershift.openshift.io/v1alpha1.PowerVSNodePoolPlatform }
+###PowerVSNodePoolImageDeletePolicy { #hypershift.openshift.io/v1beta1.PowerVSNodePoolImageDeletePolicy }
(Appears on:
-NodePoolPlatform)
+PowerVSNodePoolPlatform)
+
+
+
PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform
+
+###PowerVSNodePoolPlatform { #hypershift.openshift.io/v1beta1.PowerVSNodePoolPlatform }
+
+(Appears on:
+NodePoolPlatform)
PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating
@@ -6024,7 +5868,9 @@ reasonable default. The current default is s922 which is generally available.
processorType
-string
+
+PowerVSNodePoolProcType
+
@@ -6087,7 +5933,7 @@ default. The current default is 32.
|
image
-
+
PowerVSResourceReference
@@ -6102,7 +5948,9 @@ is chosen based on the NodePool release payload image.
|
storageType
-string
+
+PowerVSNodePoolStorageType
+
|
@@ -6118,7 +5966,9 @@ Although, the exact numbers might change over time, the Tier 3 storage is curren
|
imageDeletePolicy
-string
+
+PowerVSNodePoolImageDeletePolicy
+
|
@@ -6131,10 +5981,44 @@ retain: delete the image from the openshift but retain in the infrastructure.
-###PowerVSPlatformSpec { #hypershift.openshift.io/v1alpha1.PowerVSPlatformSpec }
+###PowerVSNodePoolProcType { #hypershift.openshift.io/v1beta1.PowerVSNodePoolProcType }
+
+(Appears on:
+PowerVSNodePoolPlatform)
+
+
+ PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform
+
+
+
+
+| Value |
+Description |
+
+
+"capped" |
+PowerVSNodePoolCappedProcType defines capped processor type
+ |
+ "dedicated" |
+PowerVSNodePoolDedicatedProcType defines dedicated processor type
+ |
+ "shared" |
+PowerVSNodePoolSharedProcType defines shared processor type
+ |
+
+
+###PowerVSNodePoolStorageType { #hypershift.openshift.io/v1beta1.PowerVSNodePoolStorageType }
(Appears on:
-PlatformSpec)
+PowerVSNodePoolPlatform)
+
+
+ PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform
+
+###PowerVSPlatformSpec { #hypershift.openshift.io/v1beta1.PowerVSPlatformSpec }
+
+(Appears on:
+PlatformSpec)
PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components
@@ -6214,7 +6098,7 @@ This field is immutable. Once set, It can’t be changed.
|
subnet
-
+
PowerVSResourceReference
@@ -6245,7 +6129,7 @@ ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud U
|
vpc
-
+
PowerVSVPC
@@ -6290,7 +6174,7 @@ This field is immutable. Once set, It can’t be changed.
|
-controlPlaneOperatorCreds
+ingressOperatorCloudCreds
Kubernetes core/v1.LocalObjectReference
@@ -6298,15 +6182,13 @@ Kubernetes core/v1.LocalObjectReference
|
- ControlPlaneOperatorCreds is a reference to a secret containing cloud
-credentials with permissions matching the control-plane-operator policy.
-This field is immutable. Once set, It can’t be changed.
-TODO(dan): document the “control plane operator policy”
+IngressOperatorCloudCreds is a reference to a secret containing ibm cloud
+credentials for ingress operator to get authenticated with ibm cloud.
|
-ingressOperatorCloudCreds
+storageOperatorCloudCreds
Kubernetes core/v1.LocalObjectReference
@@ -6314,17 +6196,17 @@ Kubernetes core/v1.LocalObjectReference
|
- IngressOperatorCloudCreds is a reference to a secret containing ibm cloud
-credentials for ingress operator to get authenticated with ibm cloud.
+StorageOperatorCloudCreds is a reference to a secret containing ibm cloud
+credentials for storage operator to get authenticated with ibm cloud.
|
-###PowerVSResourceReference { #hypershift.openshift.io/v1alpha1.PowerVSResourceReference }
+###PowerVSResourceReference { #hypershift.openshift.io/v1beta1.PowerVSResourceReference }
(Appears on:
-PowerVSNodePoolPlatform,
-PowerVSPlatformSpec)
+PowerVSNodePoolPlatform,
+PowerVSPlatformSpec)
PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name.
@@ -6365,10 +6247,10 @@ string
-###PowerVSVPC { #hypershift.openshift.io/v1alpha1.PowerVSVPC }
+###PowerVSVPC { #hypershift.openshift.io/v1beta1.PowerVSVPC }
(Appears on:
-PowerVSPlatformSpec)
+PowerVSPlatformSpec)
PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control
@@ -6436,20 +6318,19 @@ This field is immutable. Once set, It can’t be changed.
-###PublishingStrategyType { #hypershift.openshift.io/v1alpha1.PublishingStrategyType }
+###PublishingStrategyType { #hypershift.openshift.io/v1beta1.PublishingStrategyType }
(Appears on:
-ServicePublishingStrategy)
+ServicePublishingStrategy)
PublishingStrategyType defines publishing strategies for services.
-###Release { #hypershift.openshift.io/v1alpha1.Release }
+###Release { #hypershift.openshift.io/v1beta1.Release }
(Appears on:
-ClusterVersionStatus,
-HostedClusterSpec,
-NodePoolSpec)
+HostedClusterSpec,
+NodePoolSpec)
Release represents the metadata for an OCP release payload image.
@@ -6475,10 +6356,10 @@ string
-###ReplaceUpgrade { #hypershift.openshift.io/v1alpha1.ReplaceUpgrade }
+###ReplaceUpgrade { #hypershift.openshift.io/v1beta1.ReplaceUpgrade }
(Appears on:
-NodePoolManagement)
+NodePoolManagement)
ReplaceUpgrade specifies upgrade behavior that replaces existing nodes
@@ -6496,25 +6377,20 @@ according to a given strategy.
strategy
-
+
UpgradeStrategy
|
Strategy is the node replacement strategy for nodes in the pool.
-
-Value must be one of:
-"OnDelete",
-"RollingUpdate"
-
|
rollingUpdate
-
+
RollingUpdate
@@ -6526,10 +6402,10 @@ creating new nodes and deleting the old ones.
|
-###RollingUpdate { #hypershift.openshift.io/v1alpha1.RollingUpdate }
+###RollingUpdate { #hypershift.openshift.io/v1beta1.RollingUpdate }
(Appears on:
-ReplaceUpgrade)
+ReplaceUpgrade)
RollingUpdate specifies a rolling update strategy which upgrades nodes by
@@ -6595,10 +6471,10 @@ running at any time during the update is at most 130% of desired nodes.
-###RoutePublishingStrategy { #hypershift.openshift.io/v1alpha1.RoutePublishingStrategy }
+###RoutePublishingStrategy { #hypershift.openshift.io/v1beta1.RoutePublishingStrategy }
(Appears on:
-ServicePublishingStrategy)
+ServicePublishingStrategy)
RoutePublishingStrategy specifies options for exposing a service as a Route.
@@ -6625,11 +6501,11 @@ string
-###SecretEncryptionSpec { #hypershift.openshift.io/v1alpha1.SecretEncryptionSpec }
+###SecretEncryptionSpec { #hypershift.openshift.io/v1beta1.SecretEncryptionSpec }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
SecretEncryptionSpec contains metadata about the kubernetes secret encryption strategy being used for the
@@ -6647,25 +6523,20 @@ cluster when applicable.
type
-
+
SecretEncryptionType
|
Type defines the type of kube secret encryption being used
-
-Value must be one of:
-"aescbc",
-"kms"
-
|
kms
-
+
KMSSpec
@@ -6679,7 +6550,7 @@ KMSSpec
|
aescbc
-
+
AESCBCSpec
@@ -6691,10 +6562,10 @@ AESCBCSpec
|
-###SecretEncryptionType { #hypershift.openshift.io/v1alpha1.SecretEncryptionType }
+###SecretEncryptionType { #hypershift.openshift.io/v1beta1.SecretEncryptionType }
(Appears on:
-SecretEncryptionSpec)
+SecretEncryptionSpec)
SecretEncryptionType defines the type of kube secret encryption being used.
@@ -6714,10 +6585,10 @@ AESCBCSpec
-###ServiceNetworkEntry { #hypershift.openshift.io/v1alpha1.ServiceNetworkEntry }
+###ServiceNetworkEntry { #hypershift.openshift.io/v1beta1.ServiceNetworkEntry }
(Appears on:
-ClusterNetworking)
+ClusterNetworking)
ServiceNetworkEntry is a single IP address block for the service network.
@@ -6745,10 +6616,10 @@ github.com/openshift/hypershift/api/util/ipnet.IPNet
-###ServicePublishingStrategy { #hypershift.openshift.io/v1alpha1.ServicePublishingStrategy }
+###ServicePublishingStrategy { #hypershift.openshift.io/v1beta1.ServicePublishingStrategy }
(Appears on:
-ServicePublishingStrategyMapping)
+ServicePublishingStrategyMapping)
ServicePublishingStrategy specfies how to publish a ServiceType.
@@ -6765,7 +6636,7 @@ github.com/openshift/hypershift/api/util/ipnet.IPNet
type
-
+
PublishingStrategyType
@@ -6778,7 +6649,7 @@ PublishingStrategyType
|
nodePort
-
+
NodePortPublishingStrategy
@@ -6791,7 +6662,7 @@ NodePortPublishingStrategy
|
loadBalancer
-
+
LoadBalancerPublishingStrategy
@@ -6804,7 +6675,7 @@ LoadBalancerPublishingStrategy
|
route
-
+
RoutePublishingStrategy
@@ -6815,11 +6686,11 @@ RoutePublishingStrategy
-###ServicePublishingStrategyMapping { #hypershift.openshift.io/v1alpha1.ServicePublishingStrategyMapping }
+###ServicePublishingStrategyMapping { #hypershift.openshift.io/v1beta1.ServicePublishingStrategyMapping }
(Appears on:
-HostedClusterSpec,
-HostedControlPlaneSpec)
+HostedClusterSpec,
+HostedControlPlaneSpec)
ServicePublishingStrategyMapping specifies how individual control plane
@@ -6837,7 +6708,7 @@ services are published from the hosting cluster of a control plane.
|
service
-
+
ServiceType
@@ -6850,7 +6721,7 @@ ServiceType
|
servicePublishingStrategy
-
+
ServicePublishingStrategy
@@ -6861,19 +6732,19 @@ ServicePublishingStrategy
-###ServiceType { #hypershift.openshift.io/v1alpha1.ServiceType }
+###ServiceType { #hypershift.openshift.io/v1beta1.ServiceType }
(Appears on:
-ServicePublishingStrategyMapping)
+ServicePublishingStrategyMapping)
ServiceType defines what control plane services can be exposed from the
management control plane.
-###UnmanagedEtcdSpec { #hypershift.openshift.io/v1alpha1.UnmanagedEtcdSpec }
+###UnmanagedEtcdSpec { #hypershift.openshift.io/v1beta1.UnmanagedEtcdSpec }
(Appears on:
-EtcdSpec)
+EtcdSpec)
UnmanagedEtcdSpec specifies configuration which enables the control plane to
@@ -6905,7 +6776,7 @@ string
|
tls
-
+
EtcdTLSConfig
@@ -6916,10 +6787,10 @@ EtcdTLSConfig
-###UpgradeStrategy { #hypershift.openshift.io/v1alpha1.UpgradeStrategy }
+###UpgradeStrategy { #hypershift.openshift.io/v1beta1.UpgradeStrategy }
(Appears on:
-ReplaceUpgrade)
+ReplaceUpgrade)
UpgradeStrategy is a specific strategy for upgrading nodes in a NodePool.
@@ -6940,10 +6811,10 @@ associated node instances are completed.
|
-###UpgradeType { #hypershift.openshift.io/v1alpha1.UpgradeType }
+###UpgradeType { #hypershift.openshift.io/v1beta1.UpgradeType }
(Appears on:
-NodePoolManagement)
+NodePoolManagement)
UpgradeType is a type of high-level upgrade behavior nodes in a NodePool.
@@ -6965,10 +6836,10 @@ capacity.
-###Volume { #hypershift.openshift.io/v1alpha1.Volume }
+###Volume { #hypershift.openshift.io/v1beta1.Volume }
(Appears on:
-AWSNodePoolPlatform)
+AWSNodePoolPlatform)
Volume specifies the configuration options for node instance storage devices.
diff --git a/go.mod b/go.mod
index acb466ad619..3f3cb03acfc 100644
--- a/go.mod
+++ b/go.mod
@@ -53,6 +53,7 @@ require (
k8s.io/component-base v0.24.2
k8s.io/kube-aggregator v0.20.2
k8s.io/kube-scheduler v0.23.1
+ k8s.io/kubectl v0.23.0-alpha.4
k8s.io/pod-security-admission v0.23.5
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
kubevirt.io/api v0.0.0-20211117075245-c94ce62baf5a
diff --git a/go.sum b/go.sum
index c05b90fe3ec..d0dda4e395d 100644
--- a/go.sum
+++ b/go.sum
@@ -1493,6 +1493,7 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -1640,6 +1641,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2000,6 +2002,7 @@ k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
k8s.io/api v0.22.3/go.mod h1:azgiXFiXqiWyLCfI62/eYBOu19rj2LKmIhFPP4+33fs=
+k8s.io/api v0.23.0-alpha.4/go.mod h1:C2RqQ86jH9nM0YFGjLhKlfldBYLnBEb5sn+x50lF2zg=
k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
@@ -2022,6 +2025,7 @@ k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCF
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.3/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.23.0-alpha.4/go.mod h1:oyH3LcOKLLooQH1NlpHlilzkWxqsiHWETyHgssntcXg=
k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
@@ -2038,6 +2042,7 @@ k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
k8s.io/apiserver v0.24.2 h1:orxipm5elPJSkkFNlwH9ClqaKEDJJA3yR2cAAlCnyj4=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI=
+k8s.io/cli-runtime v0.23.0-alpha.4/go.mod h1:SZl5LCIkb5GE/MrBNkg9hliYyhWW5/3etfPxVkgm5SU=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
@@ -2046,6 +2051,7 @@ k8s.io/client-go v0.20.10/go.mod h1:fFg+aLoasv/R+xiVaWjxeqGFYltzgQcOQzkFaSRfnJ0=
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
+k8s.io/client-go v0.23.0-alpha.4/go.mod h1:OBGvnY60bm0zXmY4unHcYUHmffR6Smg2AqJ3pzORKYk=
k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
@@ -2058,6 +2064,7 @@ k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbW
k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
+k8s.io/code-generator v0.23.0-alpha.4/go.mod h1:alK4pz5+y/zKXOPBnND3TvXOC/iF2oYTBDynHO1+qlI=
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
@@ -2068,11 +2075,13 @@ k8s.io/component-base v0.20.10/go.mod h1:ZKOEin1xu68aJzxgzl5DZSp5J1IrjAOPlPN90/t
k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ=
k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
+k8s.io/component-base v0.23.0-alpha.4/go.mod h1:CD9PHLOKNi/x4tJLxoLaLA2EPkCeiT/1m/8PpPxwp80=
k8s.io/component-base v0.23.1/go.mod h1:6llmap8QtJIXGDd4uIWJhAq0Op8AtQo6bDW2RrNMTeo=
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/component-helpers v0.22.2/go.mod h1:+N61JAR9aKYSWbnLA88YcFr9K/6ISYvRNybX7QW7Rs8=
+k8s.io/component-helpers v0.23.0-alpha.4/go.mod h1:9JY0wSpT+OAoaW1b975MYpDfj/qM59+FZ4NiNodaj9s=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
@@ -2099,14 +2108,18 @@ k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4y
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20210817084001-7fbd8d59e5b8/go.mod h1:foAE7XkrXQ1Qo2eWsW/iWksptrVdbl6t+vscSdmmGjk=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kube-scheduler v0.23.1 h1:YdGM/eE/gp1uUnpK+w2J8PBiOX7xOB5qu857BeWyrFM=
k8s.io/kube-scheduler v0.23.1/go.mod h1:SFPvXnt7KlxTZILrtjH8VNwGDzXcdKKHrv4TkeZdYro=
k8s.io/kubectl v0.22.2/go.mod h1:BApg2j0edxLArCOfO0ievI27EeTQqBDMNU9VQH734iQ=
+k8s.io/kubectl v0.23.0-alpha.4 h1:K10xASq7FwIfOVb8dViiBkVTD1nlQ7G/6gGT5RsFP50=
+k8s.io/kubectl v0.23.0-alpha.4/go.mod h1:jnIoKE99+oHasLCksVCyqdzSaDAQlNhKPMpUVNfkjLI=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.22.2/go.mod h1:GUcsBtpsqQD1tKFS/2wCKu4ZBowwRncLOJH1rgWs3uw=
+k8s.io/metrics v0.23.0-alpha.4/go.mod h1:xR8qXb5BggcPuBgmZUli9cBgIqofIw+Gbi/D3ZdNBic=
k8s.io/pod-security-admission v0.23.5 h1:60MTMOK+/hPDgYzZ2C6zFLGaNzYDiMyy6TuZ/rv9Db8=
k8s.io/pod-security-admission v0.23.5/go.mod h1:aSyWfjev8Zil5DaZBZ+ICAObZmZlRqhnAZHxA9r71UI=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
diff --git a/hack/app-sre/saas_template.yaml b/hack/app-sre/saas_template.yaml
index 0e5123808d2..d5651746131 100644
--- a/hack/app-sre/saas_template.yaml
+++ b/hack/app-sre/saas_template.yaml
@@ -309,6 +309,7 @@ objects:
- --enable-ocp-cluster-monitoring=false
- --enable-ci-debug-output=false
- --private-platform=AWS
+ - --cert-dir=/var/run/secrets/serving-cert
- --oidc-storage-provider-s3-bucket-name=${OIDC_S3_NAME}
- --oidc-storage-provider-s3-region=${OIDC_S3_REGION}
- --oidc-storage-provider-s3-credentials=/etc/oidc-storage-provider-s3-creds/${OIDC_S3_CREDS_SECRET_KEY}
@@ -368,6 +369,8 @@ objects:
securityContext:
runAsUser: 1000
volumeMounts:
+ - mountPath: /var/run/secrets/serving-cert
+ name: serving-cert
- mountPath: /etc/oidc-storage-provider-s3-creds
name: oidc-storage-provider-s3-creds
- mountPath: /etc/provider
@@ -377,6 +380,9 @@ objects:
priorityClassName: hypershift-operator
serviceAccountName: operator
volumes:
+ - name: serving-cert
+ secret:
+ secretName: manager-serving-cert
- name: oidc-storage-provider-s3-creds
secret:
secretName: ${OIDC_S3_CREDS_SECRET}
@@ -19854,10 +19860,22 @@ objects:
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.9.2
+ service.beta.openshift.io/inject-cabundle: "true"
creationTimestamp: null
name: awsendpointservices.hypershift.openshift.io
spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: operator
+ namespace: ${NAMESPACE}
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1beta1
+ - v1alpha1
group: hypershift.openshift.io
names:
kind: AWSEndpointService
@@ -20021,8 +20039,169 @@ objects:
description: EndpointServiceName is the name of the Endpoint Service
created in the management VPC
type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AWSEndpointService specifies a request for an Endpoint Service
+ in AWS
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase. More
+ info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AWSEndpointServiceSpec defines the desired state of AWSEndpointService
+ properties:
+ networkLoadBalancerName:
+ description: The name of the NLB for which an Endpoint Service should
+ be configured
+ type: string
+ resourceTags:
+ description: Tags to apply to the EndpointService
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some AWS service
+ do not support empty values. Since tags are added to resources
+ in many services, the length of the tag value must meet
+ the requirements of all services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ type: array
+ subnetIDs:
+ description: SubnetIDs is the list of subnet IDs to which guest
+ nodes can attach
+ items:
+ type: string
+ type: array
required:
- - conditions
+ - networkLoadBalancerName
+ type: object
+ status:
+ description: AWSEndpointServiceStatus defines the observed state of
+ AWSEndpointService
+ properties:
+ conditions:
+ description: "Conditions contains details for the current state
+ of the Endpoint Service request If there is an error processing
+ the request e.g. the NLB doesn't exist, then the Available condition
+ will be false, reason AWSErrorReason, and the error reported in
+ the message. \n Current condition types are: \"Available\""
+ items:
+ description: "Condition contains details for one aspect of the
+ current state of this API Resource. --- This struct is intended
+ for direct use as an array at the field path .status.conditions.
+ \ For example, type FooStatus struct{ // Represents the observations
+ of a foo's current state. // Known .status.conditions.type are:
+ \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be
+ when the underlying condition changed. If that is not known,
+ then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if
+ .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ dnsNames:
+ description: DNSName are the names for the records created in the
+ hypershift private zone
+ items:
+ type: string
+ type: array
+ dnsZoneID:
+ description: DNSZoneID is ID for the hypershift private zone
+ type: string
+ endpointID:
+ description: EndpointID is the ID of the Endpoint created in the
+ guest VPC
+ type: string
+ endpointServiceName:
+ description: EndpointServiceName is the name of the Endpoint Service
+ created in the management VPC
+ type: string
type: object
type: object
served: true
@@ -20039,10 +20218,22 @@ objects:
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.9.2
+ service.beta.openshift.io/inject-cabundle: "true"
creationTimestamp: null
name: hostedclusters.hypershift.openshift.io
spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: operator
+ namespace: ${NAMESPACE}
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1beta1
+ - v1alpha1
group: hypershift.openshift.io
names:
kind: HostedCluster
@@ -20079,6 +20270,8 @@ objects:
jsonPath: .status.conditions[?(@.type=="Available")].message
name: Message
type: string
+ deprecated: true
+ deprecationWarning: v1alpha1 is a deprecated version for HostedCluster
name: v1alpha1
schema:
openAPIV3Schema:
@@ -20164,6 +20357,12 @@ objects:
format: int32
type: integer
type: object
+ channel:
+ description: channel is an identifier for explicitly requesting
+ that a non-default set of updates be applied to this cluster.
+ The default channel will be contain stable updates that are appropriate
+ for production clusters.
+ type: string
clusterID:
description: ClusterID uniquely identifies this cluster. This is
expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@@ -23255,19 +23454,300 @@ objects:
description: Version is the status of the release version applied
to the HostedCluster.
properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image
+ and associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a
+ release or the metadata returned by the update API and
+ should be displayed as a link in user interfaces. The
+ URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates
+ that may be recommended for this cluster if it meets specific
+ required conditions. Consumers interested in the set of updates
+ that are actually recommended for this cluster should use
+ availableUpdates. This list may be empty if no updates are
+ recommended, if the update service is unavailable, or if an
+ empty or invalid channel has been specified.
+ items:
+ description: ConditionalUpdate represents an update which
+ is recommended to some clusters on the version the current
+ cluster is reconciling, but which may not be recommended
+ for the current cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules.
+ * Recommended, for whether the update is recommended
+ for the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type
+ FooStatus struct{ // Represents the observations of
+ a foo's current state. // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"
+ // +patchMergeKey=type // +patchStrategy=merge //
+ +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\"
+ patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time
+ the condition transitioned from one status to
+ another. This should be when the underlying condition
+ changed. If that is not known, then using the
+ time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message
+ indicating details about the transition. This
+ may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9,
+ the condition is out of date with respect to the
+ current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last
+ transition. Producers of specific condition types
+ may define expected values and meanings for this
+ field, and whether the values are considered a
+ guaranteed API. The value should be a CamelCase
+ string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to
+ deconflict is important. The regex it matches
+ is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of
+ spec, image is optional if version is specified
+ and the availableUpdates field contains a matching
+ version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update
+ API and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly
+ releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and
+ which do not. The slice is ordered by decreasing
+ precedence. The cluster-version operator will
+ walk the slice in order, and stop after the first
+ it can successfully evaluate. If no condition
+ can be successfully evaluated, the update will
+ not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query
+ classifying clusters. This query query
+ should return a 1 in the match case
+ and a 0 in the does-not-match case.
+ Queries which return no time series,
+ or which return values besides 0 or
+ 1, are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that
+ matchingRules match the cluster state. This is
+ only to be consumed by humans. It may contain
+ Line Feed characters (U+000A), which should be
+ rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this
+ risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
desired:
description: desired is the version that the cluster is reconciling
towards. If the cluster is not yet fully initialized desired
will be set with the information available, which may be an
image or a tag.
properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
image:
- description: Image is the image pullspec of an OCP release
- payload image.
- pattern: ^(\w+\S+)$
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
type: string
- required:
- - image
type: object
history:
description: history contains a list of the most recent versions
@@ -23345,50 +23825,49 @@ objects:
format: int64
type: integer
required:
+ - availableUpdates
- desired
- observedGeneration
type: object
- required:
- - conditions
type: object
type: object
served: true
- storage: true
+ storage: false
subresources:
status: {}
- status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: null
- storedVersions: null
-- apiVersion: apiextensions.k8s.io/v1
- kind: CustomResourceDefinition
- metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.9.2
- creationTimestamp: null
- labels:
- cluster.x-k8s.io/v1beta1: v1alpha1
- name: hostedcontrolplanes.hypershift.openshift.io
- spec:
- group: hypershift.openshift.io
- names:
- categories:
- - cluster-api
- kind: HostedControlPlane
- listKind: HostedControlPlaneList
- plural: hostedcontrolplanes
- shortNames:
- - hcp
- - hcps
- singular: hostedcontrolplane
- scope: Namespaced
- versions:
- - name: v1alpha1
+ - additionalPrinterColumns:
+ - description: Version
+ jsonPath: .status.version.history[?(@.state=="Completed")].version
+ name: Version
+ type: string
+ - description: KubeConfig Secret
+ jsonPath: .status.kubeconfig.name
+ name: KubeConfig
+ type: string
+ - description: Progress
+ jsonPath: .status.version.history[?(@.state!="")].state
+ name: Progress
+ type: string
+ - description: Available
+ jsonPath: .status.conditions[?(@.type=="Available")].status
+ name: Available
+ type: string
+ - description: Progressing
+ jsonPath: .status.conditions[?(@.type=="Progressing")].status
+ name: Progressing
+ type: string
+ - description: Message
+ jsonPath: .status.conditions[?(@.type=="Available")].message
+ name: Message
+ type: string
+ name: v1beta1
schema:
openAPIV3Schema:
- description: HostedControlPlane defines the desired state of HostedControlPlane
+ description: HostedCluster is the primary representation of a HyperShift
+ cluster and encapsulates the control plane and common data plane configuration.
+ Creating a HostedCluster results in a fully functional OpenShift control
+ plane with no attached nodes. To support workloads (e.g. pods), a HostedCluster
+ may have one or more associated NodePool resources.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
@@ -23404,11 +23883,12 @@ objects:
metadata:
type: object
spec:
- description: HostedControlPlaneSpec defines the desired state of HostedControlPlane
+ description: Spec is the desired behavior of the HostedCluster.
properties:
additionalTrustBundle:
- description: AdditionalTrustBundle references a ConfigMap containing
- a PEM-encoded X.509 certificate bundle
+ description: AdditionalTrustBundle is a reference to a ConfigMap
+ containing a PEM-encoded X.509 certificate bundle that will be
+ added to the hosted controlplane and nodes
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -23416,35 +23896,15 @@ objects:
type: string
type: object
x-kubernetes-map-type: atomic
- apiAdvertiseAddress:
- description: deprecated use networking.apiServer.AdvertiseAddress
- APIAdvertiseAddress is the address at which the APIServer listens
- inside a worker.
- type: string
- apiAllowedCIDRBlocks:
- description: deprecated use networking.apiServer.APIAllowedCIDRBlocks
- APIAllowedCIDRBlocks is an allow list of CIDR blocks that can
- access the APIServer If not specified, traffic is allowed from
- all addresses. This depends on underlying support by the cloud
- provider for Service LoadBalancerSourceRanges
- items:
- pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
- type: string
- type: array
- apiPort:
- description: deprecated use networking.apiServer.APIPort APIPort
- is the port at which the APIServer listens inside a worker
- format: int32
- type: integer
auditWebhook:
- description: AuditWebhook contains metadata for configuring an audit
- webhook endpoint for a cluster to process cluster audit events.
- It references a secret that contains the webhook information for
- the audit webhook endpoint. It is a secret because if the endpoint
- has MTLS the kubeconfig will contain client keys. This is currently
- only supported in IBM Cloud. The kubeconfig needs to be stored
- in the secret with a secret key name that corresponds to the constant
- AuditWebhookKubeconfigKey.
+ description: "AuditWebhook contains metadata for configuring an
+ audit webhook endpoint for a cluster to process cluster audit
+ events. It references a secret that contains the webhook information
+ for the audit webhook endpoint. It is a secret because if the
+ endpoint has mTLS the kubeconfig will contain client keys. The
+ kubeconfig needs to be stored in the secret with a secret key
+ name that corresponds to the constant AuditWebhookKubeconfigKey.
+ \n This field is currently only supported on the IBMCloud platform."
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -23485,15 +23945,26 @@ objects:
format: int32
type: integer
type: object
+ channel:
+ description: channel is an identifier for explicitly requesting
+ that a non-default set of updates be applied to this cluster.
+ The default channel will be contain stable updates that are appropriate
+ for production clusters.
+ type: string
clusterID:
- description: ClusterID is the unique id that identifies the cluster
- externally. Making it optional here allows us to keep compatibility
- with previous versions of the control-plane-operator that have
- no knowledge of this field.
+ description: ClusterID uniquely identifies this cluster. This is
+ expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ in hexadecimal values). As with a Kubernetes metadata.uid, this
+ ID uniquely identifies this cluster in space and time. This value
+ identifies the cluster in metrics pushed to telemetry and metrics
+ produced by the control plane operators. If a value is not specified,
+ an ID is generated. After initial creation, the value is immutable.
+ pattern: '[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}'
type: string
configuration:
- description: 'Configuration embeds resources that correspond to
- the openshift configuration API: https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html'
+ description: Configuration specifies configuration for individual
+ OCP components in the cluster, represented as embedded resources
+ that correspond to the openshift configuration API.
properties:
apiServer:
description: APIServer holds configuration (like serving certificates,
@@ -23885,23 +24356,6 @@ objects:
type: object
type: array
type: object
- configMapRefs:
- description: "ConfigMapRefs holds references to any configmaps
- referenced by configuration entries. Entries can reference
- the configmaps using local object references. \n Deprecated
- This field is deprecated and will be removed in a future release"
- items:
- description: LocalObjectReference contains enough information
- to let you locate the referenced object inside the same
- namespace.
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
featureGate:
description: FeatureGate holds cluster-wide information about
feature gates.
@@ -24288,14 +24742,6 @@ objects:
type: object
type: array
type: object
- items:
- description: "Items embeds the serialized configuration resources.
- \n Deprecated This field is deprecated and will be removed
- in a future release"
- items:
- type: object
- type: array
- x-kubernetes-preserve-unknown-fields: true
network:
description: 'Network holds cluster-wide information about the
network. It is used to configure the desired network configuration,
@@ -25213,23 +25659,6 @@ objects:
- NoScoring
type: string
type: object
- secretRefs:
- description: "SecretRefs holds references to any secrets referenced
- by configuration entries. Entries can reference the secrets
- using local object references. \n Deprecated This field is
- deprecated and will be removed in a future release"
- items:
- description: LocalObjectReference contains enough information
- to let you locate the referenced object inside the same
- namespace.
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
- type: array
type: object
controllerAvailabilityPolicy:
default: SingleReplica
@@ -25238,7 +25667,7 @@ objects:
value is SingleReplica.
type: string
dns:
- description: DNSSpec specifies the DNS configuration in the cluster.
+ description: DNS specifies DNS configuration for the cluster.
properties:
baseDomain:
description: BaseDomain is the base domain of the cluster.
@@ -25256,8 +25685,16 @@ objects:
- baseDomain
type: object
etcd:
- description: Etcd contains metadata about the etcd cluster the hypershift
- managed Openshift control plane components use to store data.
+ default:
+ managed:
+ storage:
+ persistentVolume:
+ size: 4Gi
+ type: PersistentVolume
+ managementType: Managed
+ description: Etcd specifies configuration for the control plane
+ etcd cluster. The default ManagementType is Managed. Once set,
+ the ManagementType cannot be changed.
properties:
managed:
description: Managed specifies the behavior of an etcd cluster
@@ -25358,12 +25795,14 @@ objects:
- managementType
type: object
fips:
- description: FIPS specifies if the nodes for the cluster will be
- running in FIPS mode
+ description: FIPS indicates whether this cluster's nodes will be
+ running in FIPS mode. If set to true, the control plane's ignition
+ server will be configured to expect that nodes joining the cluster
+ will be FIPS-enabled.
type: boolean
imageContentSources:
- description: ImageContentSources lists sources/repositories for
- the release-image content.
+ description: ImageContentSources specifies image mirrors that can
+ be used by cluster nodes to pull content.
items:
description: ImageContentSource specifies image mirrors that can
be used by cluster nodes to pull content. For cluster workloads,
@@ -25386,6 +25825,9 @@ objects:
type: object
type: array
infraID:
+ description: InfraID is a globally unique identifier for the cluster.
+ This identifier will be used to associate various cloud resources
+ with the HostedCluster and its associated NodePools.
type: string
infrastructureAvailabilityPolicy:
default: SingleReplica
@@ -25394,35 +25836,16 @@ objects:
nodes. The default value is SingleReplica.
type: string
issuerURL:
- type: string
- kubeconfig:
- description: KubeConfig specifies the name and key for the kubeconfig
- secret
- properties:
- key:
- type: string
- name:
- type: string
- required:
- - key
- - name
- type: object
- machineCIDR:
- description: deprecated use networking.MachineNetwork
- type: string
- networkType:
- description: deprecated use networking.NetworkType NetworkType specifies
- the SDN provider used for cluster networking.
- enum:
- - OpenShiftSDN
- - Calico
- - OVNKubernetes
- - Other
+ default: https://kubernetes.default.svc
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which
+ only works for in-cluster validation.
+ format: uri
type: string
networking:
description: Networking specifies network configuration for the
- cluster. Temporarily optional for backward compatibility, required
- in future releases.
+ cluster.
properties:
apiServer:
description: APIServer contains advanced network settings for
@@ -25453,9 +25876,8 @@ objects:
type: integer
type: object
clusterNetwork:
- description: 'ClusterNetwork is the list of IP address pools
- for pods. TODO: make this required in the next version of
- the API'
+ description: ClusterNetwork is the list of IP address pools
+ for pods.
items:
description: ClusterNetworkEntry is a single IP address block
for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength.
@@ -25474,14 +25896,9 @@ objects:
- cidr
type: object
type: array
- machineCIDR:
- description: Deprecated This field will be removed in the next
- API release. Use MachineNetwork instead
- type: string
machineNetwork:
- description: 'MachineNetwork is the list of IP address pools
- for machines. TODO: make this required in the next version
- of the API'
+ description: MachineNetwork is the list of IP address pools
+ for machines.
items:
description: MachineNetworkEntry is a single IP address block
for node IP blocks.
@@ -25504,18 +25921,9 @@ objects:
- OVNKubernetes
- Other
type: string
- podCIDR:
- description: Deprecated This field will be removed in the next
- API release. Use ClusterNetwork instead
- type: string
- serviceCIDR:
- description: Deprecated This field will be removed in the next
- API release. Use ServiceNetwork instead
- type: string
serviceNetwork:
description: 'ServiceNetwork is the list of IP address pools
- for services. NOTE: currently only one entry is supported.
- TODO: make this required in the next version of the API'
+ for services. NOTE: currently only one entry is supported.'
items:
description: ServiceNetworkEntry is a single IP address block
for the service network.
@@ -25529,6 +25937,7 @@ objects:
type: object
type: array
required:
+ - clusterNetwork
- networkType
type: object
nodeSelector:
@@ -25556,9 +25965,8 @@ objects:
reconciliation is paused on the resource until the field is removed.'
type: string
platform:
- description: PlatformSpec specifies the underlying infrastructure
- provider for the cluster and is used to configure platform specific
- behavior.
+ description: Platform specifies the underlying infrastructure provider
+ for the cluster and is used to configure platform specific behavior.
properties:
agent:
description: Agent specifies configuration for agent-based installations.
@@ -25625,16 +26033,6 @@ objects:
required:
- vpc
type: object
- controlPlaneOperatorCreds:
- description: Deprecated This field will be removed in the
- next API release. Use RolesRef instead.
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
endpointAccess:
default: Public
description: EndpointAccess specifies the publishing scope
@@ -25644,26 +26042,6 @@ objects:
- PublicAndPrivate
- Private
type: string
- kubeCloudControllerCreds:
- description: Deprecated This field will be removed in the
- next API release. Use RolesRef instead.
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
- nodePoolManagementCreds:
- description: Deprecated This field will be removed in the
- next API release. Use RolesRef instead.
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
region:
description: Region is the AWS region in which the cluster
resides. This configures the OCP control plane cloud integrations,
@@ -25703,23 +26081,6 @@ objects:
type: object
maxItems: 25
type: array
- roles:
- description: Deprecated This field will be removed in the
- next API release. Use RolesRef instead.
- items:
- properties:
- arn:
- type: string
- name:
- type: string
- namespace:
- type: string
- required:
- - arn
- - name
- - namespace
- type: object
- type: array
rolesRef:
description: RolesRef contains references to various AWS
IAM roles required to enable integrations such as OIDC.
@@ -25906,9 +26267,6 @@ objects:
type: object
type: array
required:
- - controlPlaneOperatorCreds
- - kubeCloudControllerCreds
- - nodePoolManagementCreds
- region
- rolesRef
type: object
@@ -25977,19 +26335,6 @@ objects:
Once set, It can't be changed.
pattern: '^crn:'
type: string
- controlPlaneOperatorCreds:
- description: "ControlPlaneOperatorCreds is a reference to
- a secret containing cloud credentials with permissions
- matching the control-plane-operator policy. This field
- is immutable. Once set, It can't be changed. \n TODO(dan):
- document the \"control plane operator policy\""
- properties:
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind, uid?'
- type: string
- type: object
- x-kubernetes-map-type: atomic
ingressOperatorCloudCreds:
description: IngressOperatorCloudCreds is a reference to
a secret containing ibm cloud credentials for ingress
@@ -26050,6 +26395,17 @@ objects:
VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
\n This field is immutable. Once set, It can't be changed."
type: string
+ storageOperatorCloudCreds:
+ description: StorageOperatorCloudCreds is a reference to
+ a secret containing ibm cloud credentials for storage
+ operator to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
subnet:
description: Subnet is the subnet to use for control plane
cloud resources. This field is immutable. Once set, It
@@ -26099,13 +26455,13 @@ objects:
required:
- accountID
- cisInstanceCRN
- - controlPlaneOperatorCreds
- ingressOperatorCloudCreds
- kubeCloudControllerCreds
- nodePoolManagementCreds
- region
- resourceGroup
- serviceInstanceID
+ - storageOperatorCloudCreds
- subnet
- vpc
- zone
@@ -26125,12 +26481,11 @@ objects:
required:
- type
type: object
- podCIDR:
- description: deprecated use networking.ClusterNetwork
- type: string
pullSecret:
- description: LocalObjectReference contains enough information to
- let you locate the referenced object inside the same namespace.
+ description: PullSecret references a pull secret to be injected
+ into the container runtime of all cluster nodes. The secret must
+ have a key named ".dockerconfigjson" whose value is the pull secret
+ JSON.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -26138,11 +26493,23 @@ objects:
type: string
type: object
x-kubernetes-map-type: atomic
- releaseImage:
- type: string
+ release:
+ description: "Release specifies the desired OCP release payload
+ for the hosted cluster. \n Updating this field will trigger a
+ rollout of the control plane. The behavior of the rollout will
+ be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy."
+ properties:
+ image:
+ description: Image is the image pullspec of an OCP release payload
+ image.
+ pattern: ^(\w+\S+)$
+ type: string
+ required:
+ - image
+ type: object
secretEncryption:
- description: SecretEncryption contains metadata about the kubernetes
- secret encryption strategy being used for the cluster when applicable.
+ description: SecretEncryption specifies a Kubernetes secret encryption
+ strategy for the control plane.
properties:
aescbc:
description: AESCBC defines metadata about the AESCBC secret
@@ -26351,7 +26718,8 @@ objects:
containing the private key used by the service account token issuer.
The secret is expected to contain a single key named "key". If
not specified, a service account signing key will be generated
- automatically for the cluster.
+ automatically for the cluster. When specifying a service account
+ signing key, a IssuerURL must also be specified.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -26359,12 +26727,11 @@ objects:
type: string
type: object
x-kubernetes-map-type: atomic
- serviceCIDR:
- description: deprecated use networking.ServiceNetwork
- type: string
services:
- description: Services defines metadata about how control plane services
- are published in the management cluster.
+ description: "Services specifies how individual control plane services
+ are published from the hosting cluster of the control plane. \n
+ If a given service is not present in this list, it will be exposed
+ publicly by default."
items:
description: ServicePublishingStrategyMapping specifies how individual
control plane services are published from the hosting cluster
@@ -26428,7 +26795,6 @@ objects:
- NodePort
- Route
- None
- - S3
type: string
required:
- type
@@ -26439,8 +26805,9 @@ objects:
type: object
type: array
sshKey:
- description: LocalObjectReference contains enough information to
- let you locate the referenced object inside the same namespace.
+ description: SSHKey references an SSH key to be injected into all
+ cluster node sshd servers. The secret must have a single key "id_rsa.pub"
+ whose value is the public part of an SSH key.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -26449,24 +26816,19 @@ objects:
type: object
x-kubernetes-map-type: atomic
required:
- - dns
- - etcd
- - infraID
- - issuerURL
+ - networking
- platform
- pullSecret
- - releaseImage
+ - release
- services
- sshKey
type: object
status:
- description: HostedControlPlaneStatus defines the observed state of
- HostedControlPlane
+ description: Status is the latest observed status of the HostedCluster.
properties:
conditions:
- description: 'Condition contains details for one aspect of the current
- state of the HostedControlPlane. Current condition types are:
- "Available"'
+ description: Conditions represents the latest available observations
+ of a control plane's current state.
items:
description: "Condition contains details for one aspect of the
current state of this API Resource. --- This struct is intended
@@ -26538,8 +26900,8 @@ objects:
type: array
controlPlaneEndpoint:
description: ControlPlaneEndpoint contains the endpoint information
- by which external clients can access the control plane. This
- is populated after the infrastructure is ready.
+ by which external clients can access the control plane. This is
+ populated after the infrastructure is ready.
properties:
host:
description: Host is the hostname on which the API server is
@@ -26553,34 +26915,24 @@ objects:
- host
- port
type: object
- externalManagedControlPlane:
- default: true
- description: ExternalManagedControlPlane indicates to cluster-api
- that the control plane is managed by an external service. https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468
- type: boolean
- initialized:
- default: false
- description: Initialized denotes whether or not the control plane
- has provided a kubeadm-config. Once this condition is marked true,
- its value is never changed. See the Ready condition for an indication
- of the current readiness of the cluster's control plane. This
- satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252
- type: boolean
- kubeConfig:
- description: KubeConfig is a reference to the secret containing
- the default kubeconfig for this control plane.
+ ignitionEndpoint:
+ description: IgnitionEndpoint is the endpoint injected in the ign
+ config userdata. It exposes the config for instances to become
+ kubernetes nodes.
+ type: string
+ kubeadminPassword:
+ description: KubeadminPassword is a reference to the secret that
+ contains the initial kubeadmin user password for the guest cluster.
properties:
- key:
- type: string
name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
- required:
- - key
- - name
type: object
- kubeadminPassword:
- description: KubeadminPassword is a reference to the secret containing
- the initial kubeadmin password for the guest cluster.
+ x-kubernetes-map-type: atomic
+ kubeconfig:
+ description: KubeConfig is a reference to the secret containing
+ the default kubeconfig for the cluster.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -26588,11 +26940,6 @@ objects:
type: string
type: object
x-kubernetes-map-type: atomic
- lastReleaseImageTransitionTime:
- description: lastReleaseImageTransitionTime is the time of the last
- update to the current releaseImage property.
- format: date-time
- type: string
oauthCallbackURLTemplate:
description: OAuthCallbackURLTemplate contains a template for the
URL to use as a callback for identity providers. The [identity-provider-name]
@@ -26600,109 +26947,8396 @@ objects:
defined on the HostedCluster. This is populated after the infrastructure
is ready.
type: string
- ready:
- default: false
- description: Ready denotes that the HostedControlPlane API Server
- is ready to receive requests This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
- type: boolean
- releaseImage:
- description: ReleaseImage is the release image applied to the hosted
- control plane.
- type: string
version:
- description: Version is the semantic version of the release applied
- by the hosted control plane operator
- type: string
- required:
- - conditions
- - initialized
- - ready
- type: object
- type: object
- served: true
- storage: true
- subresources:
- status: {}
- status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: null
- storedVersions: null
-- apiVersion: apiextensions.k8s.io/v1
- kind: CustomResourceDefinition
- metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.9.2
- creationTimestamp: null
- name: nodepools.hypershift.openshift.io
- spec:
- group: hypershift.openshift.io
- names:
- kind: NodePool
- listKind: NodePoolList
- plural: nodepools
- shortNames:
- - np
- - nps
- singular: nodepool
- scope: Namespaced
- versions:
- - additionalPrinterColumns:
- - description: Cluster
- jsonPath: .spec.clusterName
- name: Cluster
- type: string
- - description: Desired Nodes
- jsonPath: .spec.replicas
- name: Desired Nodes
- type: integer
- - description: Available Nodes
- jsonPath: .status.replicas
- name: Current Nodes
- type: integer
- - description: Autoscaling Enabled
- jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status
- name: Autoscaling
- type: string
- - description: Node Autorepair Enabled
- jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status
- name: Autorepair
- type: string
- - description: Current version
- jsonPath: .status.version
- name: Version
- type: string
- - description: UpdatingVersion in progress
- jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status
- name: UpdatingVersion
- type: string
- - description: UpdatingConfig in progress
- jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status
- name: UpdatingConfig
- type: string
- - description: Message
- jsonPath: .status.conditions[?(@.type=="Ready")].message
- name: Message
- type: string
- name: v1alpha1
- schema:
- openAPIV3Schema:
- description: NodePool is a scalable set of worker nodes attached to a HostedCluster.
- NodePool machine architectures are uniform within a given pool, and are
- independent of the control plane’s underlying machine architecture.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource
- this object represents. Servers may infer this from the endpoint the
- client submits requests to. Cannot be updated. In CamelCase. More
- info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
+ description: Version is the status of the release version applied
+ to the HostedCluster.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image
+ and associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a
+ release or the metadata returned by the update API and
+ should be displayed as a link in user interfaces. The
+ URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates
+ that may be recommended for this cluster if it meets specific
+ required conditions. Consumers interested in the set of updates
+ that are actually recommended for this cluster should use
+ availableUpdates. This list may be empty if no updates are
+ recommended, if the update service is unavailable, or if an
+ empty or invalid channel has been specified.
+ items:
+ description: ConditionalUpdate represents an update which
+ is recommended to some clusters on the version the current
+ cluster is reconciling, but which may not be recommended
+ for the current cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules.
+ * Recommended, for whether the update is recommended
+ for the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type
+ FooStatus struct{ // Represents the observations of
+ a foo's current state. // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"
+ // +patchMergeKey=type // +patchStrategy=merge //
+ +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\"
+ patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time
+ the condition transitioned from one status to
+ another. This should be when the underlying condition
+ changed. If that is not known, then using the
+ time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message
+ indicating details about the transition. This
+ may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9,
+ the condition is out of date with respect to the
+ current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last
+ transition. Producers of specific condition types
+ may define expected values and meanings for this
+ field, and whether the values are considered a
+ guaranteed API. The value should be a CamelCase
+ string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to
+ deconflict is important. The regex it matches
+ is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of
+ spec, image is optional if version is specified
+ and the availableUpdates field contains a matching
+ version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update
+ API and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly
+ releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and
+ which do not. The slice is ordered by decreasing
+ precedence. The cluster-version operator will
+ walk the slice in order, and stop after the first
+ it can successfully evaluate. If no condition
+ can be successfully evaluated, the update will
+ not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query
+ classifying clusters. This query query
+ should return a 1 in the match case
+ and a 0 in the does-not-match case.
+ Queries which return no time series,
+ or which return values besides 0 or
+ 1, are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that
+ matchingRules match the cluster state. This is
+ only to be consumed by humans. It may contain
+ Line Feed characters (U+000A), which should be
+ rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this
+ risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is
+ ordered by recency. Updates in the history have state Completed
+ if the rollout completed - if an update was failing or halfway
+ applied the state will be Partial. Only a limited amount of
+ update history is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to
+ the cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition
+ an Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion
+ time will always be set for entries that are not the
+ current update (usually to the started time of the next
+ update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that
+ contains the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not
+ define a version, or if a failure occurs retrieving
+ the image, this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the
+ spec is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: null
+ storedVersions: null
+- apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ annotations:
+ service.beta.openshift.io/inject-cabundle: "true"
+ creationTimestamp: null
+ labels:
+ cluster.x-k8s.io/v1beta1: v1beta1
+ name: hostedcontrolplanes.hypershift.openshift.io
+ spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: operator
+ namespace: ${NAMESPACE}
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1beta1
+ - v1alpha1
+ group: hypershift.openshift.io
+ names:
+ categories:
+ - cluster-api
+ kind: HostedControlPlane
+ listKind: HostedControlPlaneList
+ plural: hostedcontrolplanes
+ shortNames:
+ - hcp
+ - hcps
+ singular: hostedcontrolplane
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: HostedControlPlane defines the desired state of HostedControlPlane
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase. More
+ info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostedControlPlaneSpec defines the desired state of HostedControlPlane
+ properties:
+ additionalTrustBundle:
+ description: AdditionalTrustBundle references a ConfigMap containing
+ a PEM-encoded X.509 certificate bundle
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ apiAdvertiseAddress:
+ description: deprecated use networking.apiServer.AdvertiseAddress
+ APIAdvertiseAddress is the address at which the APIServer listens
+ inside a worker.
+ type: string
+ apiAllowedCIDRBlocks:
+ description: deprecated use networking.apiServer.APIAllowedCIDRBlocks
+ APIAllowedCIDRBlocks is an allow list of CIDR blocks that can
+ access the APIServer If not specified, traffic is allowed from
+ all addresses. This depends on underlying support by the cloud
+ provider for Service LoadBalancerSourceRanges
+ items:
+ pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
+ type: string
+ type: array
+ apiPort:
+ description: deprecated use networking.apiServer.APIPort APIPort
+ is the port at which the APIServer listens inside a worker
+ format: int32
+ type: integer
+ auditWebhook:
+ description: AuditWebhook contains metadata for configuring an audit
+ webhook endpoint for a cluster to process cluster audit events.
+ It references a secret that contains the webhook information for
+ the audit webhook endpoint. It is a secret because if the endpoint
+ has MTLS the kubeconfig will contain client keys. This is currently
+ only supported in IBM Cloud. The kubeconfig needs to be stored
+ in the secret with a secret key name that corresponds to the constant
+ AuditWebhookKubeconfigKey.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ autoscaling:
+ description: Autoscaling specifies auto-scaling behavior that applies
+ to all NodePools associated with the control plane.
+ properties:
+ maxNodeProvisionTime:
+ description: MaxNodeProvisionTime is the maximum time to wait
+ for node provisioning before considering the provisioning
+ to be unsuccessful, expressed as a Go duration string. The
+ default is 15 minutes.
+ pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$
+ type: string
+ maxNodesTotal:
+ description: MaxNodesTotal is the maximum allowable number of
+ nodes across all NodePools for a HostedCluster. The autoscaler
+ will not grow the cluster beyond this number.
+ format: int32
+ minimum: 0
+ type: integer
+ maxPodGracePeriod:
+ description: MaxPodGracePeriod is the maximum seconds to wait
+ for graceful pod termination before scaling down a NodePool.
+ The default is 600 seconds.
+ format: int32
+ minimum: 0
+ type: integer
+ podPriorityThreshold:
+ description: "PodPriorityThreshold enables users to schedule
+ \"best-effort\" pods, which shouldn't trigger autoscaler actions,
+ but only run when there are spare resources available. The
+ default is -10. \n See the following for more details: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption"
+ format: int32
+ type: integer
+ type: object
+ channel:
+ description: channel is an identifier for explicitly requesting
+ that a non-default set of updates be applied to this cluster.
+ The default channel will be contain stable updates that are appropriate
+ for production clusters.
+ type: string
+ clusterID:
+ description: ClusterID is the unique id that identifies the cluster
+ externally. Making it optional here allows us to keep compatibility
+ with previous versions of the control-plane-operator that have
+ no knowledge of this field.
+ type: string
+ configuration:
+ description: 'Configuration embeds resources that correspond to
+ the openshift configuration API: https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html'
+ properties:
+ apiServer:
+ description: APIServer holds configuration (like serving certificates,
+ client CA and CORS domains) shared by all API servers in the
+ system, among them especially kube-apiserver and openshift-apiserver.
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional,
+ user-defined regular expressions describing hosts for
+ which the API server allows access using the CORS headers.
+ This may be needed to access the API and the integrated
+ OAuth server from JavaScript applications. The values
+ are regular expressions that correspond to the Golang
+ regular expression language.
+ items:
+ type: string
+ type: array
+ audit:
+ default:
+ profile: Default
+ description: audit specifies the settings for audit configuration
+ to be applied to all OpenShift-provided API servers in
+ the cluster.
+ properties:
+ customRules:
+ description: customRules specify profiles per group.
+ These profile take precedence over the top-level profile
+ field if they apply. They are evaluation from top
+ to bottom and the first one that matches, applies.
+ items:
+ description: AuditCustomRule describes a custom rule
+ for an audit profile that takes precedence over
+ the top-level profile.
+ properties:
+ group:
+ description: group is a name of group a request
+ user must be member of in order to this profile
+ to apply.
+ minLength: 1
+ type: string
+ profile:
+ description: "profile specifies the name of the
+ desired audit policy configuration to be deployed
+ to all OpenShift-provided API servers in the
+ cluster. \n The following profiles are provided:
+ - Default: the existing default policy. - WriteRequestBodies:
+ like 'Default', but logs request and response
+ HTTP payloads for write requests (create, update,
+ patch). - AllRequestBodies: like 'WriteRequestBodies',
+ but also logs request and response HTTP payloads
+ for read requests (get, list). - None: no requests
+ are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n If unset, the 'Default'
+ profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ required:
+ - group
+ - profile
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - group
+ x-kubernetes-list-type: map
+ profile:
+ default: Default
+ description: "profile specifies the name of the desired
+ top-level audit profile to be applied to all requests
+ sent to any of the OpenShift-provided API servers
+ in the cluster (kube-apiserver, openshift-apiserver
+ and oauth-apiserver), with the exception of those
+ requests that match one or more of the customRules.
+ \n The following profiles are provided: - Default:
+ default policy which means MetaData level logging
+ with the exception of events (not logged at all),
+ oauthaccesstokens and oauthauthorizetokens (both logged
+ at RequestBody level). - WriteRequestBodies: like
+ 'Default', but logs request and response HTTP payloads
+ for write requests (create, update, patch). - AllRequestBodies:
+ like 'WriteRequestBodies', but also logs request and
+ response HTTP payloads for read requests (get, list).
+ - None: no requests are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n Warning: It is not recommended
+ to disable audit logging by using the `None` profile
+ unless you are fully aware of the risks of not logging
+ data that can be beneficial when troubleshooting issues.
+ If you disable audit logging and a support situation
+ arises, you might need to enable audit logging and
+ reproduce the issue in order to troubleshoot properly.
+ \n If unset, the 'Default' profile is used as the
+ default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ type: object
+ clientCA:
+ description: 'clientCA references a ConfigMap containing
+ a certificate bundle for the signers that will be recognized
+ for incoming client certificates in addition to the operator
+ managed signers. If this is empty, then only operator
+ managed signers are valid. You usually only have to set
+ this if you have your own PKI you wish to honor client
+ certificates from. The ConfigMap must exist in the openshift-config
+ namespace and contain the following required fields: -
+ ConfigMap.Data["ca-bundle.crt"] - CA bundle.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ encryption:
+ description: encryption allows the configuration of encryption
+ of resources at the datastore layer.
+ properties:
+ type:
+ description: "type defines what encryption type should
+ be used to encrypt resources at the datastore layer.
+ When this field is unset (i.e. when it is set to the
+ empty string), identity is implied. The behavior of
+ unset can and will change over time. Even if encryption
+ is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in
+ best practices. \n When encryption is enabled, all
+ sensitive resources shipped with the platform are
+ encrypted. This list of sensitive resources can and
+ will change over time. The current authoritative
+ list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
+ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
+ enum:
+ - ""
+ - identity
+ - aescbc
+ type: string
+ type: object
+ servingCerts:
+ description: servingCert is the TLS cert info for serving
+ secure traffic. If not specified, operator managed certificates
+ will be used for serving secure traffic.
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing
+ the TLS cert info for serving secure traffic to specific
+ hostnames. If no named certificates are provided,
+ or no named certificates match the server name as
+ understood by a client, the defaultServingCertificate
+ will be used.
+ items:
+ description: APIServerNamedServingCert maps a server
+ DNS name, as understood by a client, to a certificate.
+ properties:
+ names:
+ description: names is a optional list of explicit
+ DNS names (leading wildcards allowed) that should
+ use this certificate to serve secure traffic.
+ If no names are provided, the implicit names
+ will be extracted from the certificates. Exact
+ names trump over wildcard names. Explicit names
+ defined here trump over extracted implicit names.
+ items:
+ type: string
+ type: array
+ servingCertificate:
+ description: 'servingCertificate references a
+ kubernetes.io/tls type secret containing the
+ TLS cert info for serving secure traffic. The
+ secret must exist in the openshift-config namespace
+ and contain the following required fields: -
+ Secret.Data["tls.key"] - TLS private key. -
+ Secret.Data["tls.crt"] - TLS certificate.'
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for
+ TLS connections for externally exposed servers. \n If
+ unset, a default (which may change between releases) is
+ chosen. Note that only Old, Intermediate and Custom profiles
+ are currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security
+ profile. Be extremely careful using a custom profile
+ as invalid configurations can be catastrophic. An
+ example custom profile looks like this: \n ciphers:
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
+ minTLSVersion: TLSv1.1"
+ nullable: true
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher
+ algorithms that are negotiated during the TLS
+ handshake. Operators may remove entries their
+ operands do not support. For example, to use
+ DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
+ items:
+ type: string
+ type: array
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the
+ minimal version of the TLS protocol that is negotiated
+ during the TLS handshake. For example, to use
+ TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion:
+ TLSv1.1 \n NOTE: currently the highest minTLSVersion
+ allowed is VersionTLS12"
+ enum:
+ - VersionTLS10
+ - VersionTLS11
+ - VersionTLS12
+ - VersionTLS13
+ type: string
+ type: object
+ intermediate:
+ description: "intermediate is a TLS security profile
+ based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ minTLSVersion: TLSv1.2"
+ nullable: true
+ type: object
+ modern:
+ description: "modern is a TLS security profile based
+ on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ nullable: true
+ type: object
+ old:
+ description: "old is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA
+ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384
+ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA
+ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256
+ - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256
+ - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA
+ minTLSVersion: TLSv1.0"
+ nullable: true
+ type: object
+ type:
+ description: "type is one of Old, Intermediate, Modern
+ or Custom. Custom provides the ability to specify
+ individual TLS security profile parameters. Old, Intermediate
+ and Modern are TLS security profiles based on: \n
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change
+ over time as new ciphers are developed and existing
+ ciphers are found to be insecure. Depending on precisely
+ which ciphers are available to a process, the list
+ may be reduced. \n Note that the Modern profile is
+ currently not supported because it is not yet well
+ adopted by common software libraries."
+ enum:
+ - Old
+ - Intermediate
+ - Modern
+ - Custom
+ type: string
+ type: object
+ type: object
+ authentication:
+ description: Authentication specifies cluster-wide settings
+ for authentication (like OAuth and webhook token authenticators).
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for an
+ external OAuth server. This discovery document can be
+ viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence
+ over any metadata reference stored in status. The key
+ "oauthMetadata" is used to locate the data. If specified
+ and the config map or expected key is not found, no metadata
+ is served. If the specified metadata is not valid, no
+ metadata is served. The namespace for this config map
+ is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ serviceAccountIssuer:
+ description: 'serviceAccountIssuer is the identifier of
+ the bound service account token issuer. The default is
+ https://kubernetes.default.svc WARNING: Updating this
+ field will result in the invalidation of all bound tokens
+ with the previous issuer value. Unless the holder of a
+ bound token has explicit support for a change in issuer,
+ they will not request a new bound token until pod restart
+ or until their existing token exceeds 80% of its duration.'
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing
+ authentication mode in use. Specifically, it manages the
+ component that responds to login attempts. The default
+ is IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticator:
+ description: webhookTokenAuthenticator configures a remote
+ token reviewer. These remote authentication webhooks can
+ be used to verify bearer tokens via the tokenreviews.authentication.k8s.io
+ REST API. This is required to honor bearer tokens that
+ are provisioned by an external authentication service.
+ properties:
+ kubeConfig:
+ description: "kubeConfig references a secret that contains
+ kube config file data which describes how to access
+ the remote webhook service. The namespace for the
+ referenced secret is openshift-config. \n For further
+ details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ \n The key \"kubeConfig\" is used to locate the data.
+ If the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data
+ is not valid, the webhook is not honored."
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - kubeConfig
+ type: object
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators is DEPRECATED, setting
+ it has no effect.
+ items:
+ description: deprecatedWebhookTokenAuthenticator holds
+ the necessary configuration options for a remote token
+ authenticator. It's the same as WebhookTokenAuthenticator
+ but it's missing the 'required' validation on KubeConfig
+ field.
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file
+ data which describes how to access the remote webhook
+ service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data.
+ If the secret or expected key is not found, the
+ webhook is not honored. If the specified kube config
+ data is not valid, the webhook is not honored. The
+ namespace for this secret is determined by the point
+ of use.'
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ configMapRefs:
+ description: "ConfigMapRefs holds references to any configmaps
+ referenced by configuration entries. Entries can reference
+ the configmaps using local object references. \n Deprecated
+ This field is deprecated and will be removed in a future release"
+ items:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ featureGate:
+ description: FeatureGate holds cluster-wide information about
+ feature gates.
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling
+ of any feature. Turning this feature set on IS NOT SUPPORTED,
+ CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its
+ nature, this setting cannot be validated. If you have
+ any typos or accidentally apply invalid combinations your
+ cluster may fail in an unrecoverable way. featureSet
+ must equal "CustomNoUpgrade" must be set to use this field.
+ nullable: true
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates
+ that you want to force off
+ items:
+ type: string
+ type: array
+ enabled:
+ description: enabled is a list of all feature gates
+ that you want to force on
+ items:
+ type: string
+ type: array
+ type: object
+ featureSet:
+ description: featureSet changes the list of features in
+ the cluster. The default is empty. Be very careful adjusting
+ this setting. Turning on or off features may cause irreversible
+ changes in your cluster which cannot be undone.
+ type: string
+ type: object
+ image:
+ description: Image governs policies related to imagestream imports
+ and runtime configuration for external registries. It allows
+ cluster admins to configure which registries OpenShift is
+ allowed to import images from, extra CA trust bundles for
+ external registries, and policies to block or allow registry
+ hostnames. When exposing OpenShift's image registry to the
+ public, this also lets cluster admins specify the external
+ hostname.
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap
+ containing additional CAs that should be trusted during
+ imagestream import, pod image pull, build image pull,
+ and imageregistry pullthrough. The namespace for this
+ config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container
+ image registries that normal users may import images from.
+ Set this list to the registries that you trust to contain
+ valid Docker images and that you want applications to
+ be able to import from. Users with permission to create
+ Images or ImageStreamMappings via the API are not affected
+ by this policy - typically only administrators or system
+ integrations will have those permissions.
+ items:
+ description: RegistryLocation contains a location of the
+ registry specified by the registry domain name. The
+ domain name might include wildcards, like '*' or '??'.
+ properties:
+ domainName:
+ description: domainName specifies a domain name for
+ the registry In case the registry use non-standard
+ (80 or 443) port, the port should be included in
+ the domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry
+ is secure (https) or insecure (http) By default
+ (if not specified) the registry is assumed as secure.
+ type: boolean
+ type: object
+ type: array
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames
+ for the default external image registry. The external
+ hostname should be set only when the image registry is
+ exposed externally. The first value is used in 'publicDockerImageRepository'
+ field in ImageStreams. The value must be in "hostname[:port]"
+ format.
+ items:
+ type: string
+ type: array
+ registrySources:
+ description: registrySources contains configuration that
+ determines how the container runtime should treat individual
+ registries when accessing images for builds+pods. (e.g.
+ whether or not to allow insecure access). It does not
+ contain configuration for the internal cluster registry.
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are the only registries
+ permitted for image pull and push actions. All other
+ registries are denied. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ items:
+ type: string
+ type: array
+ blockedRegistries:
+ description: "blockedRegistries cannot be used for image
+ pull and push actions. All other registries are permitted.
+ \n Only one of BlockedRegistries or AllowedRegistries
+ may be set."
+ items:
+ type: string
+ type: array
+ containerRuntimeSearchRegistries:
+ description: 'containerRuntimeSearchRegistries are registries
+ that will be searched when pulling images that do
+ not have fully qualified domains in their pull specs.
+ Registries will be searched in the order provided
+ in the list. Note: this search list only works with
+ the container runtime, i.e CRI-O. Will NOT work with
+ builds or imagestream imports.'
+ format: hostname
+ items:
+ type: string
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ insecureRegistries:
+ description: insecureRegistries are registries which
+ do not have a valid TLS certificates or only support
+ HTTP connections.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ ingress:
+ description: Ingress holds cluster-wide information about ingress,
+ including the default ingress domain used for routes.
+ properties:
+ appsDomain:
+ description: appsDomain is an optional domain to use instead
+ of the one specified in the domain field when a Route
+ is created without specifying an explicit host. If appsDomain
+ is nonempty, this value is used to generate default host
+ values for Route. Unlike domain, appsDomain may be modified
+ after installation. This assumes a new ingresscontroller
+ has been setup with a wildcard certificate.
+ type: string
+ componentRoutes:
+ description: "componentRoutes is an optional list of routes
+ that are managed by OpenShift components that a cluster-admin
+ is able to configure the hostname and serving certificate
+ for. The namespace and name of each route in this list
+ should match an existing entry in the status.componentRoutes
+ list. \n To determine the set of configurable Routes,
+ look at namespace and name of entries in the .status.componentRoutes
+ list, where participating operators write the status of
+ configurable routes."
+ items:
+ description: ComponentRouteSpec allows for configuration
+ of a route's hostname and serving certificate.
+ properties:
+ hostname:
+ description: hostname is the hostname that should
+ be used by the route.
+ pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$
+ type: string
+ name:
+ description: "name is the logical name of the route
+ to customize. \n The namespace and name of this
+ componentRoute must match a corresponding entry
+ in the list of status.componentRoutes if the route
+ is to be customized."
+ maxLength: 256
+ minLength: 1
+ type: string
+ namespace:
+ description: "namespace is the namespace of the route
+ to customize. \n The namespace and name of this
+ componentRoute must match a corresponding entry
+ in the list of status.componentRoutes if the route
+ is to be customized."
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ servingCertKeyPairSecret:
+ description: servingCertKeyPairSecret is a reference
+ to a secret of type `kubernetes.io/tls` in the openshift-config
+ namespace. The serving cert/key pair must match
+ and will be used by the operator to fulfill the
+ intent of serving with this name. If the custom
+ hostname uses the default routing suffix of the
+ cluster, the Secret specification for a serving
+ certificate will not be needed.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - hostname
+ - name
+ - namespace
+ type: object
+ type: array
+ domain:
+ description: "domain is used to generate a default host
+ name for a route when the route's host name is empty.
+ The generated host name will follow this pattern: \"..\".
+ \n It is also used as the default wildcard domain suffix
+ for ingress. The default ingresscontroller domain will
+ follow this pattern: \"*.\". \n Once set, changing
+ domain is not currently supported."
+ type: string
+ requiredHSTSPolicies:
+ description: "requiredHSTSPolicies specifies HSTS policies
+ that are required to be set on newly created or updated
+ routes matching the domainPattern/s and namespaceSelector/s
+ that are specified in the policy. Each requiredHSTSPolicy
+ must have at least a domainPattern and a maxAge to validate
+ a route HSTS Policy route annotation, and affect route
+ admission. \n A candidate route is checked for HSTS Policies
+ if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\"
+ E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains
+ \n - For each candidate route, if it matches a requiredHSTSPolicy
+ domainPattern and optional namespaceSelector, then the
+ maxAge, preloadPolicy, and includeSubdomainsPolicy must
+ be valid to be admitted. Otherwise, the route is rejected.
+ - The first match, by domainPattern and optional namespaceSelector,
+ in the ordering of the RequiredHSTSPolicies determines
+ the route's admission status. - If the candidate route
+ doesn't match any requiredHSTSPolicy domainPattern and
+ optional namespaceSelector, then it may use any HSTS Policy
+ annotation. \n The HSTS policy configuration may be changed
+ after routes have already been created. An update to a
+ previously admitted route may then fail if the updated
+ route does not conform to the updated HSTS policy configuration.
+ However, changing the HSTS policy configuration will not
+ cause a route that is already admitted to stop working.
+ \n Note that if there are no RequiredHSTSPolicies, any
+ HSTS Policy annotation on the route is valid."
+ items:
+ properties:
+ domainPatterns:
+ description: "domainPatterns is a list of domains
+ for which the desired HSTS annotations are required.
+ If domainPatterns is specified and a route is created
+ with a spec.host matching one of the domains, the
+ route must specify the HSTS Policy components described
+ in the matching RequiredHSTSPolicy. \n The use of
+ wildcards is allowed like this: *.foo.com matches
+ everything under foo.com. foo.com only matches foo.com,
+ so to cover foo.com and everything under it, you
+ must specify *both*."
+ items:
+ type: string
+ minItems: 1
+ type: array
+ includeSubDomainsPolicy:
+ description: 'includeSubDomainsPolicy means the HSTS
+ Policy should apply to any subdomains of the host''s
+ domain name. Thus, for the host bar.foo.com, if
+ includeSubDomainsPolicy was set to RequireIncludeSubDomains:
+ - the host app.bar.foo.com would inherit the HSTS
+ Policy of bar.foo.com - the host bar.foo.com would
+ inherit the HSTS Policy of bar.foo.com - the host
+ foo.com would NOT inherit the HSTS Policy of bar.foo.com
+ - the host def.foo.com would NOT inherit the HSTS
+ Policy of bar.foo.com'
+ enum:
+ - RequireIncludeSubDomains
+ - RequireNoIncludeSubDomains
+ - NoOpinion
+ type: string
+ maxAge:
+ description: maxAge is the delta time range in seconds
+ during which hosts are regarded as HSTS hosts. If
+ set to 0, it negates the effect, and hosts are removed
+ as HSTS hosts. If set to 0 and includeSubdomains
+ is specified, all subdomains of the host are also
+ removed as HSTS hosts. maxAge is a time-to-live
+ value, and if this policy is not refreshed on a
+ client, the HSTS policy will eventually expire on
+ that client.
+ properties:
+ largestMaxAge:
+ description: The largest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age This value
+ can be left unspecified, in which case no upper
+ limit is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ smallestMaxAge:
+ description: The smallest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age Setting max-age=0
+ allows the deletion of an existing HSTS header
+ from a host. This is a necessary tool for administrators
+ to quickly correct mistakes. This value can
+ be left unspecified, in which case no lower
+ limit is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ type: object
+ namespaceSelector:
+ description: namespaceSelector specifies a label selector
+ such that the policy applies only to those routes
+ that are in namespaces with labels that match the
+ selector, and are in one of the DomainPatterns.
+ Defaults to the empty LabelSelector, which matches
+ everything.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ preloadPolicy:
+ description: preloadPolicy directs the client to include
+ hosts in its host preload list so that it never
+ needs to do an initial load to get the HSTS header
+ (note that this is not defined in RFC 6797 and is
+ therefore client implementation-dependent).
+ enum:
+ - RequirePreload
+ - RequireNoPreload
+ - NoOpinion
+ type: string
+ required:
+ - domainPatterns
+ type: object
+ type: array
+ type: object
+ items:
+ description: "Items embeds the serialized configuration resources.
+ \n Deprecated This field is deprecated and will be removed
+ in a future release"
+ items:
+ type: object
+ type: array
+ x-kubernetes-preserve-unknown-fields: true
+ network:
+ description: 'Network holds cluster-wide information about the
+ network. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin,
+ etc. Please view network.spec for an explanation on what applies
+ when configuring this resource. TODO (csrwng): Add validation
+ here to exclude changes that conflict with networking settings
+ in the HostedCluster.Spec.Networking field.'
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field
+ is immutable after installation.
+ items:
+ description: ClusterNetworkEntry is a contiguous block
+ of IP addresses from which pod IPs are allocated.
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate
+ to each node. If this field is not used by the plugin,
+ it can be left unset.
+ format: int32
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ externalIP:
+ description: externalIP defines configuration for controllers
+ that affect Service.ExternalIP. If nil, then ExternalIP
+ is not allowed to be set.
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from
+ which to automatically assign Service.ExternalIP.
+ These are assigned when the service is of type LoadBalancer.
+ In general, this is only useful for bare-metal clusters.
+ In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected
+ by any ExternalIPPolicy rules. Currently, only one
+ entry may be provided.
+ items:
+ type: string
+ type: array
+ policy:
+ description: policy is a set of restrictions applied
+ to the ExternalIP field. If nil or empty, then ExternalIP
+ is not allowed to be set.
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed
+ CIDRs.
+ items:
+ type: string
+ type: array
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed
+ CIDRs. These take precedence over allowedCIDRs.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed
+ (e.g. OpenShiftSDN). This should match a value that the
+ cluster-network-operator understands, or else no networking
+ will be installed. Currently supported values are: - OpenShiftSDN
+ This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we
+ only support a single entry here. This field is immutable
+ after installation.
+ items:
+ type: string
+ type: array
+ serviceNodePortRange:
+ description: The port range allowed for Services of type
+ NodePort. If not specified, the default of 30000-32767
+ will be used. Such Services without a NodePort specified
+ will have one automatically allocated from this range.
+ This parameter can be updated after the cluster is installed.
+ pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: string
+ type: object
+ oauth:
+ description: OAuth holds cluster-wide information about OAuth.
+ It is used to configure the integrated OAuth server. This
+ configuration is only honored when the top level Authentication
+ config has type set to IntegratedOAuth.
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways
+ for a user to identify themselves. When this list is empty,
+ no identities are provisioned for users.
+ items:
+ description: IdentityProvider provides identities for
+ users authenticating using credentials
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options
+ for the BasicAuth IdP
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to
+ locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used
+ to locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect
+ to
+ type: string
+ type: object
+ github:
+ description: github enables user authentication using
+ GitHub credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. This can
+ only be configured when hostname is set to a
+ non-empty value. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostname:
+ description: hostname is the optional domain (e.g.
+ "mycompany.com") for use with a hosted instance
+ of GitHub Enterprise. It must match the GitHub
+ Enterprise settings value configured at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts
+ which organizations are allowed to log in
+ items:
+ type: string
+ type: array
+ teams:
+ description: teams optionally restricts which
+ teams are allowed to log in. Format is /.
+ items:
+ type: string
+ type: array
+ type: object
+ gitlab:
+ description: gitlab enables user authentication using
+ GitLab credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the oauth server base URL
+ type: string
+ type: object
+ google:
+ description: google enables user authentication using
+ Google credentials
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostedDomain:
+ description: hostedDomain is the optional Google
+ App domain (e.g. "mycompany.com") to restrict
+ logins to
+ type: string
+ type: object
+ htpasswd:
+ description: htpasswd enables user authentication
+ using an HTPasswd file to validate credentials
+ properties:
+ fileData:
+ description: fileData is a required reference
+ to a secret by name containing the data to use
+ as the htpasswd file. The key "htpasswd" is
+ used to locate the data. If the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not
+ valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ keystone:
+ description: keystone enables user authentication
+ using keystone password credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ domainName:
+ description: domainName is required for keystone
+ v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to
+ locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used
+ to locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect
+ to
+ type: string
+ type: object
+ ldap:
+ description: ldap enables user authentication using
+ LDAP credentials
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to
+ identities
+ properties:
+ email:
+ description: email is the list of attributes
+ whose values should be used as the email
+ address. Optional. If unspecified, no email
+ is set for the identity
+ items:
+ type: string
+ type: array
+ id:
+ description: id is the list of attributes
+ whose values should be used as the user
+ ID. Required. First non-empty attribute
+ is used. At least one attribute is required.
+ If none of the listed attribute have a value,
+ authentication fails. LDAP standard identity
+ attribute is "dn"
+ items:
+ type: string
+ type: array
+ name:
+ description: name is the list of attributes
+ whose values should be used as the display
+ name. Optional. If unspecified, no display
+ name is set for the identity LDAP standard
+ display name attribute is "cn"
+ items:
+ type: string
+ type: array
+ preferredUsername:
+ description: preferredUsername is the list
+ of attributes whose values should be used
+ as the preferred username. LDAP standard
+ login attribute is "uid"
+ items:
+ type: string
+ type: array
+ type: object
+ bindDN:
+ description: bindDN is an optional DN to bind
+ with during the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference
+ to a secret by name containing a password to
+ bind with during the search phase. The key "bindPassword"
+ is used to locate the data. If specified and
+ the secret or expected key is not found, the
+ identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: 'insecure, if true, indicates the
+ connection should not use TLS WARNING: Should
+ not be set to `true` with the URL scheme "ldaps://"
+ as "ldaps://" URLs always attempt to connect
+ using TLS, even when `insecure` is set to `true`
+ When `true`, "ldap://" URLS connect insecurely.
+ When `false`, "ldap://" URLs are upgraded to
+ a TLS connection using StartTLS as specified
+ in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies
+ the LDAP search parameters to use. The syntax
+ of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ type: object
+ mappingMethod:
+ description: mappingMethod determines how identities
+ from this provider are mapped to users Defaults
+ to "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities
+ returned by this provider. - It MUST be unique and
+ not shared by any other identity provider used -
+ It MUST be a valid path segment: name cannot equal
+ "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using
+ OpenID credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ claims:
+ description: claims mappings
+ properties:
+ email:
+ description: email is the list of claims whose
+ values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ groups:
+ description: groups is the list of claims
+ value of which should be used to synchronize
+ groups from the OIDC provider to OpenShift
+ for the user. If multiple claims are specified,
+ the first one with a non-empty value is
+ used.
+ items:
+ description: OpenIDClaim represents a claim
+ retrieved from an OpenID provider's tokens
+ or userInfo responses
+ minLength: 1
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ description: name is the list of claims whose
+ values should be used as the display name.
+ Optional. If unspecified, no display name
+ is set for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ preferredUsername:
+ description: preferredUsername is the list
+ of claims whose values should be used as
+ the preferred username. If unspecified,
+ the preferred username is determined from
+ the value of the sub claim
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ extraAuthorizeParameters:
+ additionalProperties:
+ type: string
+ description: extraAuthorizeParameters are any
+ custom parameters to add to the authorize request.
+ type: object
+ extraScopes:
+ description: extraScopes are any scopes to request
+ in addition to the standard "openid" scope.
+ items:
+ type: string
+ type: array
+ issuer:
+ description: issuer is the URL that the OpenID
+ Provider asserts as its Issuer Identifier. It
+ must use the https scheme with no query or fragment
+ component.
+ type: string
+ type: object
+ requestHeader:
+ description: requestHeader enables user authentication
+ using request header credentials
+ properties:
+ ca:
+ description: ca is a required reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the
+ TLS certificate presented by the remote server.
+ Specifically, it allows verification of incoming
+ requests to prevent header spoofing. The key
+ "ca.crt" is used to locate the data. If the
+ config map or expected key is not found, the
+ identity provider is not honored. If the specified
+ ca data is not valid, the identity provider
+ is not honored. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ challengeURL:
+ description: challengeURL is a URL to redirect
+ unauthenticated /authorize requests to Unauthenticated
+ requests from OAuth clients which expect WWW-Authenticate
+ challenges will be redirected here. ${url} is
+ replaced with the current URL, escaped to be
+ safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query
+ string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional
+ list of common names to require a match from.
+ If empty, any client certificate validated against
+ the clientCA bundle is considered authoritative.
+ items:
+ type: string
+ type: array
+ emailHeaders:
+ description: emailHeaders is the set of headers
+ to check for the email address
+ items:
+ type: string
+ type: array
+ headers:
+ description: headers is the set of headers to
+ check for identity information
+ items:
+ type: string
+ type: array
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect interactive
+ logins will be redirected here ${url} is replaced
+ with the current URL, escaped to be safe in
+ a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query
+ string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers
+ to check for the display name
+ items:
+ type: string
+ type: array
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set
+ of headers to check for the preferred username
+ items:
+ type: string
+ type: array
+ type: object
+ type:
+ description: type identifies the identity provider
+ type for this entry.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ templates:
+ description: templates allow you to customize pages like
+ the login page.
+ properties:
+ error:
+ description: error is the name of a secret that specifies
+ a go template to use to render error pages during
+ the authentication or grant flow. The key "errors.html"
+ is used to locate the template data. If specified
+ and the secret or expected key is not found, the default
+ error page is used. If the specified template is not
+ valid, the default error page is used. If unspecified,
+ the default error page is used. The namespace for
+ this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ login:
+ description: login is the name of a secret that specifies
+ a go template to use to render the login page. The
+ key "login.html" is used to locate the template data.
+ If specified and the secret or expected key is not
+ found, the default login page is used. If the specified
+ template is not valid, the default login page is used.
+ If unspecified, the default login page is used. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ providerSelection:
+ description: providerSelection is the name of a secret
+ that specifies a go template to use to render the
+ provider selection page. The key "providers.html"
+ is used to locate the template data. If specified
+ and the secret or expected key is not found, the default
+ provider selection page is used. If the specified
+ template is not valid, the default provider selection
+ page is used. If unspecified, the default provider
+ selection page is used. The namespace for this secret
+ is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ tokenConfig:
+ description: tokenConfig contains options for authorization
+ and access tokens
+ properties:
+ accessTokenInactivityTimeout:
+ description: "accessTokenInactivityTimeout defines the
+ token inactivity timeout for tokens granted by any
+ client. The value represents the maximum amount of
+ time that can occur between consecutive uses of the
+ token. Tokens become invalid if they are not used
+ within this temporal window. The user will need to
+ acquire a new token to regain access once a token
+ times out. Takes valid time duration string such as
+ \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed
+ value for duration is 300s (5 minutes). If the timeout
+ is configured per client, then that value takes precedence.
+ If the timeout value is not specified and the client
+ does not override the value, then tokens are valid
+ until their lifetime. \n WARNING: existing tokens'
+ timeout will not be affected (lowered) by changing
+ this value"
+ type: string
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds -
+ DEPRECATED: setting this field has no effect.'
+ format: int32
+ type: integer
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum
+ age of access tokens
+ format: int32
+ type: integer
+ type: object
+ type: object
+ proxy:
+ description: Proxy holds cluster-wide information on how to
+ configure default proxies for the cluster.
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP
+ requests. Empty means unset and will not result in an
+ env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS
+ requests. Empty means unset and will not result in an
+ env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs and/or IPs for which the proxy should not
+ be used. Empty means unset and will not result in an env
+ var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ items:
+ type: string
+ type: array
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle. The trustedCA field should only
+ be consumed by a proxy validator. The validator is responsible
+ for reading the certificate bundle from the required key
+ \"ca-bundle.crt\", merging it with the system default
+ trust bundle, and writing the merged trust bundle to a
+ ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. Clients that expect to make proxy connections
+ must use the trusted-ca-bundle for all HTTPS requests
+ to the proxy, and may use the trusted-ca-bundle for non-proxy
+ HTTPS requests as well. \n The namespace for the ConfigMap
+ referenced by trustedCA is \"openshift-config\". Here
+ is an example ConfigMap (in yaml): \n apiVersion: v1 kind:
+ ConfigMap metadata: name: user-ca-bundle namespace: openshift-config
+ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom
+ CA certificate bundle. -----END CERTIFICATE-----"
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ scheduler:
+ description: Scheduler holds cluster-wide config information
+ to run the Kubernetes Scheduler and influence its placement
+ decisions. The canonical name for this config is `cluster`.
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide
+ default node selector to restrict pod placement to specific
+ nodes. This is applied to the pods created in all namespaces
+ and creates an intersection with any existing nodeSelectors
+ already set on a pod, additionally constraining that pod''s
+ selector. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having
+ project-wide node selectors won''t be impacted even if
+ this field is set. This adds an annotation section to
+ the namespace. For example, if a new namespace is created
+ with node-selector=''type=user-node,region=east'', the
+ annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector
+ annotation is set on the project the value is used in
+ preference to the value we are setting for defaultNodeSelector
+ field. For instance, openshift.io/node-selector: "type=user-node,region=west"
+ means that the default of "type=user-node,region=east"
+ set in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to
+ be schedulable. When this flag is turned on, all the master
+ nodes in the cluster will be made schedulable, so that
+ workload pods can run on them. The default value for this
+ field is false, meaning none of the master nodes are schedulable.
+ Important Note: Once the workload pods start running on
+ the master nodes, extreme care must be taken to ensure
+ that cluster-critical control plane components are not
+ impacted. Please turn on this field after doing due diligence.'
+ type: boolean
+ policy:
+ description: 'DEPRECATED: the scheduler Policy API has been
+ deprecated and will be removed in a future release. policy
+ is a reference to a ConfigMap containing scheduler policy
+ which has user specified predicates and priorities. If
+ this ConfigMap is not available scheduler will default
+ to use DefaultAlgorithmProvider. The namespace for this
+ configmap is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ profile:
+ description: "profile sets which scheduling profile should
+ be set in order to configure scheduling decisions for
+ new pods. \n Valid values are \"LowNodeUtilization\",
+ \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\""
+ enum:
+ - ""
+ - LowNodeUtilization
+ - HighNodeUtilization
+ - NoScoring
+ type: string
+ type: object
+ secretRefs:
+ description: "SecretRefs holds references to any secrets referenced
+ by configuration entries. Entries can reference the secrets
+ using local object references. \n Deprecated This field is
+ deprecated and will be removed in a future release"
+ items:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ type: object
+ controllerAvailabilityPolicy:
+ default: SingleReplica
+ description: ControllerAvailabilityPolicy specifies the availability
+ policy applied to critical control plane components. The default
+ value is SingleReplica.
+ type: string
+ dns:
+ description: DNSSpec specifies the DNS configuration in the cluster.
+ properties:
+ baseDomain:
+ description: BaseDomain is the base domain of the cluster.
+ type: string
+ privateZoneID:
+ description: PrivateZoneID is the Hosted Zone ID where all the
+ DNS records that are only available internally to the cluster
+ exist.
+ type: string
+ publicZoneID:
+ description: PublicZoneID is the Hosted Zone ID where all the
+ DNS records that are publicly accessible to the internet exist.
+ type: string
+ required:
+ - baseDomain
+ type: object
+ etcd:
+ description: Etcd contains metadata about the etcd cluster the hypershift
+ managed Openshift control plane components use to store data.
+ properties:
+ managed:
+ description: Managed specifies the behavior of an etcd cluster
+ managed by HyperShift.
+ properties:
+ storage:
+ description: Storage specifies how etcd data is persisted.
+ properties:
+ persistentVolume:
+ description: PersistentVolume is the configuration for
+ PersistentVolume etcd storage. With this implementation,
+ a PersistentVolume will be allocated for every etcd
+ member (either 1 or 3 depending on the HostedCluster
+ control plane availability configuration).
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Size is the minimum size of the data
+ volume for each etcd member.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClassName:
+ description: "StorageClassName is the StorageClass
+ of the data volume for each etcd member. \n See
+ https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1."
+ type: string
+ type: object
+ restoreSnapshotURL:
+ description: RestoreSnapshotURL allows an optional list
+ of URLs to be provided where an etcd snapshot can
+ be downloaded, for example a pre-signed URL referencing
+ a storage service, one URL per replica. This snapshot
+ will be restored on initial startup, only when the
+ etcd PV is empty.
+ items:
+ type: string
+ type: array
+ type:
+ description: Type is the kind of persistent storage
+ implementation to use for etcd.
+ enum:
+ - PersistentVolume
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - storage
+ type: object
+ managementType:
+ description: ManagementType defines how the etcd cluster is
+ managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged specifies configuration which enables
+ the control plane to integrate with an eternally managed etcd
+ cluster.
+ properties:
+ endpoint:
+ description: "Endpoint is the full etcd cluster client endpoint
+ URL. For example: \n https://etcd-client:2379 \n If the
+ URL uses an HTTPS scheme, the TLS field is required."
+ pattern: ^https://
+ type: string
+ tls:
+ description: TLS specifies TLS configuration for HTTPS etcd
+ client endpoints.
+ properties:
+ clientSecret:
+ description: "ClientSecret refers to a secret for client
+ mTLS authentication with the etcd cluster. It may
+ have the following key/value pairs: \n etcd-client-ca.crt:
+ Certificate Authority value etcd-client.crt: Client
+ certificate value etcd-client.key: Client certificate
+ key value"
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - clientSecret
+ type: object
+ required:
+ - endpoint
+ - tls
+ type: object
+ required:
+ - managementType
+ type: object
+ fips:
+ description: FIPS specifies if the nodes for the cluster will be
+ running in FIPS mode
+ type: boolean
+ imageContentSources:
+ description: ImageContentSources lists sources/repositories for
+ the release-image content.
+ items:
+ description: ImageContentSource specifies image mirrors that can
+ be used by cluster nodes to pull content. For cluster workloads,
+ if a container image registry host of the pullspec matches Source
+ then one of the Mirrors are substituted as hosts in the pullspec
+ and tried in order to fetch the image.
+ properties:
+ mirrors:
+ description: Mirrors are one or more repositories that may
+ also contain the same images.
+ items:
+ type: string
+ type: array
+ source:
+ description: Source is the repository that users refer to,
+ e.g. in image pull specifications.
+ type: string
+ required:
+ - source
+ type: object
+ type: array
+ infraID:
+ type: string
+ infrastructureAvailabilityPolicy:
+ default: SingleReplica
+ description: InfrastructureAvailabilityPolicy specifies the availability
+ policy applied to infrastructure services which run on cluster
+ nodes. The default value is SingleReplica.
+ type: string
+ issuerURL:
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which
+ only works for in-cluster validation.
+ type: string
+ kubeconfig:
+ description: KubeConfig specifies the name and key for the kubeconfig
+ secret
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ machineCIDR:
+ description: deprecated use networking.MachineNetwork
+ type: string
+ networkType:
+ description: deprecated use networking.NetworkType NetworkType specifies
+ the SDN provider used for cluster networking.
+ enum:
+ - OpenShiftSDN
+ - Calico
+ - OVNKubernetes
+ - Other
+ type: string
+ networking:
+ description: Networking specifies network configuration for the
+ cluster. Temporarily optional for backward compatibility, required
+ in future releases.
+ properties:
+ apiServer:
+ description: APIServer contains advanced network settings for
+ the API server that affect how the APIServer is exposed inside
+ a cluster node.
+ properties:
+ advertiseAddress:
+ description: AdvertiseAddress is the address that nodes
+ will use to talk to the API server. This is an address
+ associated with the loopback adapter of each node. If
+ not specified, 172.20.0.1 is used.
+ type: string
+ allowedCIDRBlocks:
+ description: AllowedCIDRBlocks is an allow list of CIDR
+ blocks that can access the APIServer If not specified,
+ traffic is allowed from all addresses. This depends on
+ underlying support by the cloud provider for Service LoadBalancerSourceRanges
+ items:
+ pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
+ type: string
+ type: array
+ port:
+ description: Port is the port at which the APIServer is
+ exposed inside a node. Other pods using host networking
+ cannot listen on this port. If not specified, 6443 is
+ used.
+ format: int32
+ type: integer
+ type: object
+ clusterNetwork:
+ description: 'ClusterNetwork is the list of IP address pools
+ for pods. TODO: make this required in the next version of
+ the API'
+ items:
+ description: ClusterNetworkEntry is a single IP address block
+ for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool.
+ type: string
+ hostPrefix:
+ description: HostPrefix is the prefix size to allocate
+ to each node from the CIDR. For example, 24 would allocate
+ 2^8=256 adresses to each node. If this field is not
+ used by the plugin, it can be left unset.
+ format: int32
+ type: integer
+ required:
+ - cidr
+ type: object
+ type: array
+ machineCIDR:
+ description: Deprecated This field will be removed in the next
+ API release. Use MachineNetwork instead
+ type: string
+ machineNetwork:
+ description: 'MachineNetwork is the list of IP address pools
+ for machines. TODO: make this required in the next version
+ of the API'
+ items:
+ description: MachineNetworkEntry is a single IP address block
+ for node IP blocks.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for machines
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ networkType:
+ default: OVNKubernetes
+ description: NetworkType specifies the SDN provider used for
+ cluster networking.
+ enum:
+ - OpenShiftSDN
+ - Calico
+ - OVNKubernetes
+ - Other
+ type: string
+ podCIDR:
+ description: Deprecated This field will be removed in the next
+ API release. Use ClusterNetwork instead
+ type: string
+ serviceCIDR:
+ description: Deprecated This field will be removed in the next
+ API release. Use ServiceNetwork instead
+ type: string
+ serviceNetwork:
+ description: 'ServiceNetwork is the list of IP address pools
+ for services. NOTE: currently only one entry is supported.
+ TODO: make this required in the next version of the API'
+ items:
+ description: ServiceNetworkEntry is a single IP address block
+ for the service network.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for services
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ required:
+ - networkType
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector when specified, must be true for the pods
+ managed by the HostedCluster to be scheduled.
+ type: object
+ olmCatalogPlacement:
+ default: management
+ description: OLMCatalogPlacement specifies the placement of OLM
+ catalog components. By default, this is set to management and
+ OLM catalog components are deployed onto the management cluster.
+ If set to guest, the OLM catalog components will be deployed onto
+ the guest cluster.
+ enum:
+ - management
+ - guest
+ type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format
+ or a boolean. If a date is provided: reconciliation is paused
+ on the resource until that date. If the boolean true is provided:
+ reconciliation is paused on the resource until the field is removed.'
+ type: string
+ platform:
+ description: PlatformSpec specifies the underlying infrastructure
+ provider for the cluster and is used to configure platform specific
+ behavior.
+ properties:
+ agent:
+ description: Agent specifies configuration for agent-based installations.
+ properties:
+ agentNamespace:
+ description: AgentNamespace is the namespace where to search
+ for Agents for this cluster
+ type: string
+ required:
+ - agentNamespace
+ type: object
+ aws:
+ description: AWS specifies configuration for clusters running
+ on Amazon Web Services.
+ properties:
+ cloudProviderConfig:
+ description: 'CloudProviderConfig specifies AWS networking
+ configuration for the control plane. This is mainly used
+ for cloud provider controller config: https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364
+ TODO(dan): should this be named AWSNetworkConfig?'
+ properties:
+ subnet:
+ description: Subnet is the subnet to use for control
+ plane cloud resources.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs
+ used to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify
+ an AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC is the VPC to use for control plane
+ cloud resources.
+ type: string
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created.
+ type: string
+ required:
+ - vpc
+ type: object
+ controlPlaneOperatorCreds:
+ description: Deprecated This field will be removed in the
+ next API release. Use RolesRef instead.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ endpointAccess:
+ default: Public
+ description: EndpointAccess specifies the publishing scope
+ of cluster endpoints. The default is Public.
+ enum:
+ - Public
+ - PublicAndPrivate
+ - Private
+ type: string
+ kubeCloudControllerCreds:
+ description: Deprecated This field will be removed in the
+ next API release. Use RolesRef instead.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ nodePoolManagementCreds:
+ description: Deprecated This field will be removed in the
+ next API release. Use RolesRef instead.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ region:
+ description: Region is the AWS region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot AMI
+ for a given release.
+ type: string
+ resourceTags:
+ description: ResourceTags is a list of additional tags to
+ apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports
+ a maximum of 50 tags per resource. OpenShift reserves
+ 25 tags for its use, leaving 25 tags available for the
+ user.
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ roles:
+ description: Deprecated This field will be removed in the
+ next API release. Use RolesRef instead.
+ items:
+ properties:
+ arn:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - arn
+ - name
+ - namespace
+ type: object
+ type: array
+ rolesRef:
+ description: RolesRef contains references to various AWS
+ IAM roles required to enable integrations such as OIDC.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value
+ referencing a role appropriate for the Control Plane
+ Operator. \n The following is an example of a valid
+ policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"ec2:CreateVpcEndpoint\", \"ec2:DescribeVpcEndpoints\",
+ \"ec2:ModifyVpcEndpoint\", \"ec2:DeleteVpcEndpoints\",
+ \"ec2:CreateTags\", \"route53:ListHostedZones\" ],
+ \"Resource\": \"*\" }, { \"Effect\": \"Allow\", \"Action\":
+ [ \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\"
+ ], \"Resource\": \"arn:aws:route53:::%s\" } ] }"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing
+ a role appropriate for the Image Registry Operator.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"s3:CreateBucket\",
+ \"s3:DeleteBucket\", \"s3:PutBucketTagging\", \"s3:GetBucketTagging\",
+ \"s3:PutBucketPublicAccessBlock\", \"s3:GetBucketPublicAccessBlock\",
+ \"s3:PutEncryptionConfiguration\", \"s3:GetEncryptionConfiguration\",
+ \"s3:PutLifecycleConfiguration\", \"s3:GetLifecycleConfiguration\",
+ \"s3:GetBucketLocation\", \"s3:ListBucket\", \"s3:GetObject\",
+ \"s3:PutObject\", \"s3:DeleteObject\", \"s3:ListBucketMultipartUploads\",
+ \"s3:AbortMultipartUpload\", \"s3:ListMultipartUploadParts\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust
+ relationship that allows it to be assumed via web
+ identity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ Example: { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\":
+ \"{{ .ProviderARN }}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\",
+ \"Condition\": { \"StringEquals\": { \"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }} } } } ] } \n IngressARN
+ is an ARN value referencing a role appropriate for
+ the Ingress Operator. \n The following is an example
+ of a valid policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"elasticloadbalancing:DescribeLoadBalancers\",
+ \"tag:GetResources\", \"route53:ListHostedZones\"
+ ], \"Resource\": \"*\" }, { \"Effect\": \"Allow\",
+ \"Action\": [ \"route53:ChangeResourceRecordSets\"
+ ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\",
+ \"arn:aws:route53:::PRIVATE_ZONE_ID\" ] } ] }"
+ type: string
+ kubeCloudControllerARN:
+ description: "KubeCloudControllerARN is an ARN value
+ referencing a role appropriate for the KCM/KCC. \n
+ The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeImages\",
+ \"ec2:DescribeRegions\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\",
+ \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\",
+ \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\",
+ \"elasticloadbalancing:AttachLoadBalancerToSubnets\",
+ \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\",
+ \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\",
+ \"elasticloadbalancing:CreateLoadBalancerListeners\",
+ \"elasticloadbalancing:ConfigureHealthCheck\", \"elasticloadbalancing:DeleteLoadBalancer\",
+ \"elasticloadbalancing:DeleteLoadBalancerListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\",
+ \"elasticloadbalancing:DetachLoadBalancerFromSubnets\",
+ \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\",
+ \"elasticloadbalancing:ModifyLoadBalancerAttributes\",
+ \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\",
+ \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\",
+ \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\",
+ \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\",
+ \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DescribeListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancerPolicies\",
+ \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\",
+ \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\",
+ \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\",
+ \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\"
+ } ] }"
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing
+ a role appropriate for the Network Operator. \n The
+ following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:DescribeInstances\",
+ \"ec2:DescribeInstanceStatus\", \"ec2:DescribeInstanceTypes\",
+ \"ec2:UnassignPrivateIpAddresses\", \"ec2:AssignPrivateIpAddresses\",
+ \"ec2:UnassignIpv6Addresses\", \"ec2:AssignIpv6Addresses\",
+ \"ec2:DescribeSubnets\", \"ec2:DescribeNetworkInterfaces\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value
+ referencing a role appropriate for the CAPI Controller.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Action\": [ \"ec2:AllocateAddress\", \"ec2:AssociateRouteTable\",
+ \"ec2:AttachInternetGateway\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateInternetGateway\", \"ec2:CreateNatGateway\",
+ \"ec2:CreateRoute\", \"ec2:CreateRouteTable\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateSubnet\", \"ec2:CreateTags\", \"ec2:DeleteInternetGateway\",
+ \"ec2:DeleteNatGateway\", \"ec2:DeleteRouteTable\",
+ \"ec2:DeleteSecurityGroup\", \"ec2:DeleteSubnet\",
+ \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\",
+ \"ec2:DescribeAddresses\", \"ec2:DescribeAvailabilityZones\",
+ \"ec2:DescribeImages\", \"ec2:DescribeInstances\",
+ \"ec2:DescribeInternetGateways\", \"ec2:DescribeNatGateways\",
+ \"ec2:DescribeNetworkInterfaces\", \"ec2:DescribeNetworkInterfaceAttribute\",
+ \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\",
+ \"ec2:DescribeSubnets\", \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\",
+ \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\",
+ \"ec2:DisassociateRouteTable\", \"ec2:DisassociateAddress\",
+ \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyNetworkInterfaceAttribute\",
+ \"ec2:ModifySubnetAttribute\", \"ec2:ReleaseAddress\",
+ \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\",
+ \"ec2:TerminateInstances\", \"tag:GetResources\",
+ \"ec2:CreateLaunchTemplate\", \"ec2:CreateLaunchTemplateVersion\",
+ \"ec2:DescribeLaunchTemplates\", \"ec2:DescribeLaunchTemplateVersions\",
+ \"ec2:DeleteLaunchTemplate\", \"ec2:DeleteLaunchTemplateVersions\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\"
+ }, { \"Condition\": { \"StringLike\": { \"iam:AWSServiceName\":
+ \"elasticloadbalancing.amazonaws.com\" } }, \"Action\":
+ [ \"iam:CreateServiceLinkedRole\" ], \"Resource\":
+ [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"
+ ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\"
+ ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\"
+ ], \"Effect\": \"Allow\" } ] }"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing
+ a role appropriate for the Storage Operator. \n The
+ following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:AttachVolume\",
+ \"ec2:CreateSnapshot\", \"ec2:CreateTags\", \"ec2:CreateVolume\",
+ \"ec2:DeleteSnapshot\", \"ec2:DeleteTags\", \"ec2:DeleteVolume\",
+ \"ec2:DescribeInstances\", \"ec2:DescribeSnapshots\",
+ \"ec2:DescribeTags\", \"ec2:DescribeVolumes\", \"ec2:DescribeVolumesModifications\",
+ \"ec2:DetachVolume\", \"ec2:ModifyVolume\" ], \"Resource\":
+ \"*\" } ] }"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ serviceEndpoints:
+ description: "ServiceEndpoints specifies optional custom
+ endpoints which will override the default service endpoint
+ of specific AWS Services. \n There must be only one ServiceEndpoint
+ for a given service name."
+ items:
+ description: AWSServiceEndpoint stores the configuration
+ for services to override existing defaults of AWS Services.
+ properties:
+ name:
+ description: Name is the name of the AWS service.
+ This must be provided and cannot be empty.
+ type: string
+ url:
+ description: URL is fully qualified URI with scheme
+ https, that overrides the default generated endpoint
+ for a client. This must be provided and cannot be
+ empty.
+ pattern: ^https://
+ type: string
+ required:
+ - name
+ - url
+ type: object
+ type: array
+ required:
+ - controlPlaneOperatorCreds
+ - kubeCloudControllerCreds
+ - nodePoolManagementCreds
+ - region
+ - rolesRef
+ type: object
+ azure:
+ description: Azure defines azure specific settings
+ properties:
+ credentials:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ location:
+ type: string
+ machineIdentityID:
+ type: string
+ resourceGroup:
+ type: string
+ securityGroupName:
+ type: string
+ subnetName:
+ type: string
+ subscriptionID:
+ type: string
+ vnetID:
+ type: string
+ vnetName:
+ type: string
+ required:
+ - credentials
+ - location
+ - machineIdentityID
+ - resourceGroup
+ - securityGroupName
+ - subnetName
+ - subscriptionID
+ - vnetID
+ - vnetName
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for
+ components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ powervs:
+ description: PowerVS specifies configuration for clusters running
+ on IBMCloud Power VS Service. This field is immutable. Once
+ set, It can't be changed.
+ properties:
+ accountID:
+ description: AccountID is the IBMCloud account id. This
+ field is immutable. Once set, It can't be changed.
+ type: string
+ cisInstanceCRN:
+ description: CISInstanceCRN is the IBMCloud CIS Service
+ Instance's Cloud Resource Name This field is immutable.
+ Once set, It can't be changed.
+ pattern: '^crn:'
+ type: string
+ controlPlaneOperatorCreds:
+ description: "ControlPlaneOperatorCreds is a reference to
+ a secret containing cloud credentials with permissions
+ matching the control-plane-operator policy. This field
+ is immutable. Once set, It can't be changed. \n TODO(dan):
+ document the \"control plane operator policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ ingressOperatorCloudCreds:
+ description: IngressOperatorCloudCreds is a reference to
+ a secret containing ibm cloud credentials for ingress
+ operator to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ kubeCloudControllerCreds:
+ description: "KubeCloudControllerCreds is a reference to
+ a secret containing cloud credentials with permissions
+ matching the cloud controller policy. This field is immutable.
+ Once set, It can't be changed. \n TODO(dan): document
+ the \"cloud controller policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ nodePoolManagementCreds:
+ description: "NodePoolManagementCreds is a reference to
+ a secret containing cloud credentials with permissions
+ matching the node pool management policy. This field is
+ immutable. Once set, It can't be changed. \n TODO(dan):
+ document the \"node pool management policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ region:
+ description: Region is the IBMCloud region in which the
+ cluster resides. This configures the OCP control plane
+ cloud integrations, and is used by NodePool to resolve
+ the correct boot image for a given release. This field
+ is immutable. Once set, It can't be changed.
+ type: string
+ resourceGroup:
+ description: ResourceGroup is the IBMCloud Resource Group
+ in which the cluster resides. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ serviceInstanceID:
+ description: "ServiceInstance is the reference to the Power
+ VS service on which the server instance(VM) will be created.
+ Power VS service is a container for all Power VS instances
+ at a specific geographic region. serviceInstance can be
+ created via IBM Cloud catalog or CLI. ServiceInstanceID
+ is the unique identifier that can be obtained from IBM
+ Cloud UI or IBM Cloud cli. \n More detail about Power
+ VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ \n This field is immutable. Once set, It can't be changed."
+ type: string
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources. This field is immutable. Once set, It
+ can't be changed.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC specifies IBM Cloud PowerVS Load Balancing
+ configuration for the control plane. This field is immutable.
+ Once set, It can't be changed.
+ properties:
+ name:
+ description: Name for VPC to used for all the service
+ load balancer. This field is immutable. Once set,
+ It can't be changed.
+ type: string
+ region:
+ description: Region is the IBMCloud region in which
+ VPC gets created, this VPC used for all the ingress
+ traffic into the OCP cluster. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ subnet:
+ description: Subnet is the subnet to use for load balancer.
+ This field is immutable. Once set, It can't be changed.
+ type: string
+ zone:
+ description: Zone is the availability zone where load
+ balancer cloud resources are created. This field is
+ immutable. Once set, It can't be changed.
+ type: string
+ required:
+ - name
+ - region
+ type: object
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ required:
+ - accountID
+ - cisInstanceCRN
+ - controlPlaneOperatorCreds
+ - ingressOperatorCloudCreds
+ - kubeCloudControllerCreds
+ - nodePoolManagementCreds
+ - region
+ - resourceGroup
+ - serviceInstanceID
+ - subnet
+ - vpc
+ - zone
+ type: object
+ type:
+ description: Type is the type of infrastructure provider for
+ the cluster.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ podCIDR:
+ description: deprecated use networking.ClusterNetwork
+ type: string
+ pullSecret:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ releaseImage:
+ description: ReleaseImage is the release image applied to the hosted
+ control plane.
+ type: string
+ secretEncryption:
+ description: SecretEncryption contains metadata about the kubernetes
+ secret encryption strategy being used for the cluster when applicable.
+ properties:
+ aescbc:
+ description: AESCBC defines metadata about the AESCBC secret
+ encryption strategy
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to encrypt
+ new secrets
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ backupKey:
+ description: BackupKey defines the old key during the rotation
+ process so previously created secrets can continue to
+ be decrypted until they are all re-encrypted with the
+ active key.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - activeKey
+ type: object
+ kms:
+ description: KMS defines metadata about the kms secret encryption
+ strategy
+ properties:
+ aws:
+ description: AWS defines metadata about the configuration
+ of the AWS KMS Secret Encryption provider
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to
+ encrypt new secrets
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for
+ the encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ auth:
+ description: Auth defines metadata about the management
+ of credentials used to interact with AWS KMS
+ properties:
+ credentials:
+ description: Credentials contains the name of the
+ secret that holds the aws credentials that can
+ be used to make the necessary KMS calls. It should
+ at key AWSCredentialsFileSecretKey contain the
+ aws credentials file that can be used to configure
+ AWS SDKs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ backupKey:
+ description: BackupKey defines the old key during the
+ rotation process so previously created secrets can
+ continue to be decrypted until they are all re-encrypted
+ with the active key.
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for
+ the encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ region:
+ description: Region contains the AWS region
+ type: string
+ required:
+ - activeKey
+ - auth
+ - region
+ type: object
+ ibmcloud:
+ description: IBMCloud defines metadata for the IBM Cloud
+ KMS encryption strategy
+ properties:
+ auth:
+ description: Auth defines metadata for how authentication
+ is done with IBM Cloud KMS
+ properties:
+ managed:
+ description: Managed defines metadata around the
+ service to service authentication strategy for
+ the IBM Cloud KMS system (all provider managed).
+ type: object
+ type:
+ description: Type defines the IBM Cloud KMS authentication
+ strategy
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged defines the auth metadata
+ the customer provides to interact with IBM Cloud
+ KMS
+ properties:
+ credentials:
+ description: Credentials should reference a
+ secret with a key field of IBMCloudIAMAPIKeySecretKey
+ that contains a apikey to call IBM Cloud KMS
+ APIs
+ properties:
+ name:
+ description: 'Name of the referent. More
+ info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ required:
+ - type
+ type: object
+ keyList:
+ description: KeyList defines the list of keys used for
+ data encryption
+ items:
+ description: IBMCloudKMSKeyEntry defines metadata
+ for an IBM Cloud KMS encryption key
+ properties:
+ correlationID:
+ description: CorrelationID is an identifier used
+ to track all api call usage from hypershift
+ type: string
+ crkID:
+ description: CRKID is the customer rook key id
+ type: string
+ instanceID:
+ description: InstanceID is the id for the key
+ protect instance
+ type: string
+ keyVersion:
+ description: KeyVersion is a unique number associated
+ with the key. The number increments whenever
+ a new key is enabled for data encryption.
+ type: integer
+ url:
+ description: URL is the url to call key protect
+ apis over
+ pattern: ^https://
+ type: string
+ required:
+ - correlationID
+ - crkID
+ - instanceID
+ - keyVersion
+ - url
+ type: object
+ type: array
+ region:
+ description: Region is the IBM Cloud region
+ type: string
+ required:
+ - auth
+ - keyList
+ - region
+ type: object
+ provider:
+ description: Provider defines the KMS provider
+ enum:
+ - IBMCloud
+ - AWS
+ type: string
+ required:
+ - provider
+ type: object
+ type:
+ description: Type defines the type of kube secret encryption
+ being used
+ enum:
+ - kms
+ - aescbc
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountSigningKey:
+ description: ServiceAccountSigningKey is a reference to a secret
+ containing the private key used by the service account token issuer.
+ The secret is expected to contain a single key named "key". If
+ not specified, a service account signing key will be generated
+ automatically for the cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceCIDR:
+ description: deprecated use networking.ServiceNetwork
+ type: string
+ services:
+ description: Services defines metadata about how control plane services
+ are published in the management cluster.
+ items:
+ description: ServicePublishingStrategyMapping specifies how individual
+ control plane services are published from the hosting cluster
+ of a control plane.
+ properties:
+ service:
+ description: Service identifies the type of service being
+ published.
+ enum:
+ - APIServer
+ - OAuthServer
+ - OIDC
+ - Konnectivity
+ - Ignition
+ - OVNSbDb
+ type: string
+ servicePublishingStrategy:
+ description: ServicePublishingStrategy specifies how to publish
+ Service.
+ properties:
+ loadBalancer:
+ description: LoadBalancer configures exposing a service
+ using a LoadBalancer.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the LoadBalancer.
+ type: string
+ type: object
+ nodePort:
+ description: NodePort configures exposing a service using
+ a NodePort.
+ properties:
+ address:
+ description: Address is the host/ip that the NodePort
+ service is exposed over.
+ type: string
+ port:
+ description: Port is the port of the NodePort service.
+ If <=0, the port is dynamically assigned when the
+ service is created.
+ format: int32
+ type: integer
+ required:
+ - address
+ type: object
+ route:
+ description: Route configures exposing a service using
+ a Route.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the Route.
+ type: string
+ type: object
+ type:
+ description: Type is the publishing strategy used for
+ the service.
+ enum:
+ - LoadBalancer
+ - NodePort
+ - Route
+ - None
+ - S3
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - service
+ - servicePublishingStrategy
+ type: object
+ type: array
+ sshKey:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - dns
+ - etcd
+ - infraID
+ - issuerURL
+ - platform
+ - pullSecret
+ - releaseImage
+ - services
+ - sshKey
+ type: object
+ status:
+ description: HostedControlPlaneStatus defines the observed state of
+ HostedControlPlane
+ properties:
+ conditions:
+ description: 'Condition contains details for one aspect of the current
+ state of the HostedControlPlane. Current condition types are:
+ "Available"'
+ items:
+ description: "Condition contains details for one aspect of the
+ current state of this API Resource. --- This struct is intended
+ for direct use as an array at the field path .status.conditions.
+ \ For example, type FooStatus struct{ // Represents the observations
+ of a foo's current state. // Known .status.conditions.type are:
+ \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be
+ when the underlying condition changed. If that is not known,
+ then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if
+ .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint contains the endpoint information
+ by which external clients can access the control plane. This
+ is populated after the infrastructure is ready.
+ properties:
+ host:
+ description: Host is the hostname on which the API server is
+ serving.
+ type: string
+ port:
+ description: Port is the port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ externalManagedControlPlane:
+ default: true
+ description: ExternalManagedControlPlane indicates to cluster-api
+ that the control plane is managed by an external service. https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468
+ type: boolean
+ initialized:
+ default: false
+ description: Initialized denotes whether or not the control plane
+ has provided a kubeadm-config. Once this condition is marked true,
+ its value is never changed. See the Ready condition for an indication
+ of the current readiness of the cluster's control plane. This
+ satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252
+ type: boolean
+ kubeConfig:
+ description: KubeConfig is a reference to the secret containing
+ the default kubeconfig for this control plane.
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ kubeadminPassword:
+ description: KubeadminPassword is a reference to the secret containing
+ the initial kubeadmin password for the guest cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ lastReleaseImageTransitionTime:
+ description: "lastReleaseImageTransitionTime is the time of the
+ last update to the current releaseImage property. \n Deprecated:
+ Use versionStatus.history[0].startedTime instead."
+ format: date-time
+ type: string
+ oauthCallbackURLTemplate:
+ description: OAuthCallbackURLTemplate contains a template for the
+ URL to use as a callback for identity providers. The [identity-provider-name]
+ placeholder must be replaced with the name of an identity provider
+ defined on the HostedCluster. This is populated after the infrastructure
+ is ready.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the HostedControlPlane API Server
+ is ready to receive requests This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
+ type: boolean
+ releaseImage:
+ description: "ReleaseImage is the release image applied to the hosted
+ control plane. \n Deprecated: Use versionStatus.desired.image
+ instead."
+ type: string
+ version:
+ description: "Version is the semantic version of the release applied
+ by the hosted control plane operator \n Deprecated: Use versionStatus.desired.version
+ instead."
+ type: string
+ versionStatus:
+ description: versionStatus is the status of the release version
+ applied by the hosted control plane operator.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image
+ and associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a
+ release or the metadata returned by the update API and
+ should be displayed as a link in user interfaces. The
+ URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates
+ that may be recommended for this cluster if it meets specific
+ required conditions. Consumers interested in the set of updates
+ that are actually recommended for this cluster should use
+ availableUpdates. This list may be empty if no updates are
+ recommended, if the update service is unavailable, or if an
+ empty or invalid channel has been specified.
+ items:
+ description: ConditionalUpdate represents an update which
+ is recommended to some clusters on the version the current
+ cluster is reconciling, but which may not be recommended
+ for the current cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules.
+ * Recommended, for whether the update is recommended
+ for the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type
+ FooStatus struct{ // Represents the observations of
+ a foo's current state. // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"
+ // +patchMergeKey=type // +patchStrategy=merge //
+ +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\"
+ patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time
+ the condition transitioned from one status to
+ another. This should be when the underlying condition
+ changed. If that is not known, then using the
+ time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message
+ indicating details about the transition. This
+ may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9,
+ the condition is out of date with respect to the
+ current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last
+ transition. Producers of specific condition types
+ may define expected values and meanings for this
+ field, and whether the values are considered a
+ guaranteed API. The value should be a CamelCase
+ string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to
+ deconflict is important. The regex it matches
+ is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of
+ spec, image is optional if version is specified
+ and the availableUpdates field contains a matching
+ version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update
+ API and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly
+ releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and
+ which do not. The slice is ordered by decreasing
+ precedence. The cluster-version operator will
+ walk the slice in order, and stop after the first
+ it can successfully evaluate. If no condition
+ can be successfully evaluated, the update will
+ not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query
+ classifying clusters. This query query
+ should return a 1 in the match case
+ and a 0 in the does-not-match case.
+ Queries which return no time series,
+ or which return values besides 0 or
+ 1, are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that
+ matchingRules match the cluster state. This is
+ only to be consumed by humans. It may contain
+ Line Feed characters (U+000A), which should be
+ rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this
+ risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is
+ ordered by recency. Updates in the history have state Completed
+ if the rollout completed - if an update was failing or halfway
+ applied the state will be Partial. Only a limited amount of
+ update history is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to
+ the cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition
+ an Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion
+ time will always be set for entries that are not the
+ current update (usually to the started time of the next
+ update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that
+ contains the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not
+ define a version, or if a failure occurs retrieving
+ the image, this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the
+ spec is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
+ required:
+ - initialized
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: HostedControlPlane defines the desired state of HostedControlPlane
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase. More
+ info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: HostedControlPlaneSpec defines the desired state of HostedControlPlane
+ properties:
+ additionalTrustBundle:
+ description: AdditionalTrustBundle references a ConfigMap containing
+ a PEM-encoded X.509 certificate bundle
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ auditWebhook:
+ description: AuditWebhook contains metadata for configuring an audit
+ webhook endpoint for a cluster to process cluster audit events.
+ It references a secret that contains the webhook information for
+ the audit webhook endpoint. It is a secret because if the endpoint
+ has MTLS the kubeconfig will contain client keys. This is currently
+ only supported in IBM Cloud. The kubeconfig needs to be stored
+ in the secret with a secret key name that corresponds to the constant
+ AuditWebhookKubeconfigKey.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ autoscaling:
+ description: Autoscaling specifies auto-scaling behavior that applies
+ to all NodePools associated with the control plane.
+ properties:
+ maxNodeProvisionTime:
+ description: MaxNodeProvisionTime is the maximum time to wait
+ for node provisioning before considering the provisioning
+ to be unsuccessful, expressed as a Go duration string. The
+ default is 15 minutes.
+ pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$
+ type: string
+ maxNodesTotal:
+ description: MaxNodesTotal is the maximum allowable number of
+ nodes across all NodePools for a HostedCluster. The autoscaler
+ will not grow the cluster beyond this number.
+ format: int32
+ minimum: 0
+ type: integer
+ maxPodGracePeriod:
+ description: MaxPodGracePeriod is the maximum seconds to wait
+ for graceful pod termination before scaling down a NodePool.
+ The default is 600 seconds.
+ format: int32
+ minimum: 0
+ type: integer
+ podPriorityThreshold:
+ description: "PodPriorityThreshold enables users to schedule
+ \"best-effort\" pods, which shouldn't trigger autoscaler actions,
+ but only run when there are spare resources available. The
+ default is -10. \n See the following for more details: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption"
+ format: int32
+ type: integer
+ type: object
+ channel:
+ description: channel is an identifier for explicitly requesting
+ that a non-default set of updates be applied to this cluster.
+ The default channel will be contain stable updates that are appropriate
+ for production clusters.
+ type: string
+ clusterID:
+ description: ClusterID is the unique id that identifies the cluster
+ externally. Making it optional here allows us to keep compatibility
+ with previous versions of the control-plane-operator that have
+ no knowledge of this field.
+ type: string
+ configuration:
+ description: 'Configuration embeds resources that correspond to
+ the openshift configuration API: https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html'
+ properties:
+ apiServer:
+ description: APIServer holds configuration (like serving certificates,
+ client CA and CORS domains) shared by all API servers in the
+ system, among them especially kube-apiserver and openshift-apiserver.
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional,
+ user-defined regular expressions describing hosts for
+ which the API server allows access using the CORS headers.
+ This may be needed to access the API and the integrated
+ OAuth server from JavaScript applications. The values
+ are regular expressions that correspond to the Golang
+ regular expression language.
+ items:
+ type: string
+ type: array
+ audit:
+ default:
+ profile: Default
+ description: audit specifies the settings for audit configuration
+ to be applied to all OpenShift-provided API servers in
+ the cluster.
+ properties:
+ customRules:
+ description: customRules specify profiles per group.
+ These profile take precedence over the top-level profile
+ field if they apply. They are evaluation from top
+ to bottom and the first one that matches, applies.
+ items:
+ description: AuditCustomRule describes a custom rule
+ for an audit profile that takes precedence over
+ the top-level profile.
+ properties:
+ group:
+ description: group is a name of group a request
+ user must be member of in order to this profile
+ to apply.
+ minLength: 1
+ type: string
+ profile:
+ description: "profile specifies the name of the
+ desired audit policy configuration to be deployed
+ to all OpenShift-provided API servers in the
+ cluster. \n The following profiles are provided:
+ - Default: the existing default policy. - WriteRequestBodies:
+ like 'Default', but logs request and response
+ HTTP payloads for write requests (create, update,
+ patch). - AllRequestBodies: like 'WriteRequestBodies',
+ but also logs request and response HTTP payloads
+ for read requests (get, list). - None: no requests
+ are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n If unset, the 'Default'
+ profile is used as the default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ required:
+ - group
+ - profile
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - group
+ x-kubernetes-list-type: map
+ profile:
+ default: Default
+ description: "profile specifies the name of the desired
+ top-level audit profile to be applied to all requests
+ sent to any of the OpenShift-provided API servers
+ in the cluster (kube-apiserver, openshift-apiserver
+ and oauth-apiserver), with the exception of those
+ requests that match one or more of the customRules.
+ \n The following profiles are provided: - Default:
+ default policy which means MetaData level logging
+ with the exception of events (not logged at all),
+ oauthaccesstokens and oauthauthorizetokens (both logged
+ at RequestBody level). - WriteRequestBodies: like
+ 'Default', but logs request and response HTTP payloads
+ for write requests (create, update, patch). - AllRequestBodies:
+ like 'WriteRequestBodies', but also logs request and
+ response HTTP payloads for read requests (get, list).
+ - None: no requests are logged at all, not even oauthaccesstokens
+ and oauthauthorizetokens. \n Warning: It is not recommended
+ to disable audit logging by using the `None` profile
+ unless you are fully aware of the risks of not logging
+ data that can be beneficial when troubleshooting issues.
+ If you disable audit logging and a support situation
+ arises, you might need to enable audit logging and
+ reproduce the issue in order to troubleshoot properly.
+ \n If unset, the 'Default' profile is used as the
+ default."
+ enum:
+ - Default
+ - WriteRequestBodies
+ - AllRequestBodies
+ - None
+ type: string
+ type: object
+ clientCA:
+ description: 'clientCA references a ConfigMap containing
+ a certificate bundle for the signers that will be recognized
+ for incoming client certificates in addition to the operator
+ managed signers. If this is empty, then only operator
+ managed signers are valid. You usually only have to set
+ this if you have your own PKI you wish to honor client
+ certificates from. The ConfigMap must exist in the openshift-config
+ namespace and contain the following required fields: -
+ ConfigMap.Data["ca-bundle.crt"] - CA bundle.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ encryption:
+ description: encryption allows the configuration of encryption
+ of resources at the datastore layer.
+ properties:
+ type:
+ description: "type defines what encryption type should
+ be used to encrypt resources at the datastore layer.
+ When this field is unset (i.e. when it is set to the
+ empty string), identity is implied. The behavior of
+ unset can and will change over time. Even if encryption
+ is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in
+ best practices. \n When encryption is enabled, all
+ sensitive resources shipped with the platform are
+ encrypted. This list of sensitive resources can and
+ will change over time. The current authoritative
+ list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
+ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
+ enum:
+ - ""
+ - identity
+ - aescbc
+ type: string
+ type: object
+ servingCerts:
+ description: servingCert is the TLS cert info for serving
+ secure traffic. If not specified, operator managed certificates
+ will be used for serving secure traffic.
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing
+ the TLS cert info for serving secure traffic to specific
+ hostnames. If no named certificates are provided,
+ or no named certificates match the server name as
+ understood by a client, the defaultServingCertificate
+ will be used.
+ items:
+ description: APIServerNamedServingCert maps a server
+ DNS name, as understood by a client, to a certificate.
+ properties:
+ names:
+ description: names is a optional list of explicit
+ DNS names (leading wildcards allowed) that should
+ use this certificate to serve secure traffic.
+ If no names are provided, the implicit names
+ will be extracted from the certificates. Exact
+ names trump over wildcard names. Explicit names
+ defined here trump over extracted implicit names.
+ items:
+ type: string
+ type: array
+ servingCertificate:
+ description: 'servingCertificate references a
+ kubernetes.io/tls type secret containing the
+ TLS cert info for serving secure traffic. The
+ secret must exist in the openshift-config namespace
+ and contain the following required fields: -
+ Secret.Data["tls.key"] - TLS private key. -
+ Secret.Data["tls.crt"] - TLS certificate.'
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for
+ TLS connections for externally exposed servers. \n If
+ unset, a default (which may change between releases) is
+ chosen. Note that only Old, Intermediate and Custom profiles
+ are currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security
+ profile. Be extremely careful using a custom profile
+ as invalid configurations can be catastrophic. An
+ example custom profile looks like this: \n ciphers:
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
+ minTLSVersion: TLSv1.1"
+ nullable: true
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher
+ algorithms that are negotiated during the TLS
+ handshake. Operators may remove entries their
+ operands do not support. For example, to use
+ DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
+ items:
+ type: string
+ type: array
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the
+ minimal version of the TLS protocol that is negotiated
+ during the TLS handshake. For example, to use
+ TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion:
+ TLSv1.1 \n NOTE: currently the highest minTLSVersion
+ allowed is VersionTLS12"
+ enum:
+ - VersionTLS10
+ - VersionTLS11
+ - VersionTLS12
+ - VersionTLS13
+ type: string
+ type: object
+ intermediate:
+ description: "intermediate is a TLS security profile
+ based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ minTLSVersion: TLSv1.2"
+ nullable: true
+ type: object
+ modern:
+ description: "modern is a TLS security profile based
+ on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ nullable: true
+ type: object
+ old:
+ description: "old is a TLS security profile based on:
+ \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA
+ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384
+ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA
+ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256
+ - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256
+ - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA
+ minTLSVersion: TLSv1.0"
+ nullable: true
+ type: object
+ type:
+ description: "type is one of Old, Intermediate, Modern
+ or Custom. Custom provides the ability to specify
+ individual TLS security profile parameters. Old, Intermediate
+ and Modern are TLS security profiles based on: \n
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change
+ over time as new ciphers are developed and existing
+ ciphers are found to be insecure. Depending on precisely
+ which ciphers are available to a process, the list
+ may be reduced. \n Note that the Modern profile is
+ currently not supported because it is not yet well
+ adopted by common software libraries."
+ enum:
+ - Old
+ - Intermediate
+ - Modern
+ - Custom
+ type: string
+ type: object
+ type: object
+ authentication:
+ description: Authentication specifies cluster-wide settings
+ for authentication (like OAuth and webhook token authenticators).
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for an
+ external OAuth server. This discovery document can be
+ viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence
+ over any metadata reference stored in status. The key
+ "oauthMetadata" is used to locate the data. If specified
+ and the config map or expected key is not found, no metadata
+ is served. If the specified metadata is not valid, no
+ metadata is served. The namespace for this config map
+ is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ serviceAccountIssuer:
+ description: 'serviceAccountIssuer is the identifier of
+ the bound service account token issuer. The default is
+ https://kubernetes.default.svc WARNING: Updating this
+ field will result in the invalidation of all bound tokens
+ with the previous issuer value. Unless the holder of a
+ bound token has explicit support for a change in issuer,
+ they will not request a new bound token until pod restart
+ or until their existing token exceeds 80% of its duration.'
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing
+ authentication mode in use. Specifically, it manages the
+ component that responds to login attempts. The default
+ is IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticator:
+ description: webhookTokenAuthenticator configures a remote
+ token reviewer. These remote authentication webhooks can
+ be used to verify bearer tokens via the tokenreviews.authentication.k8s.io
+ REST API. This is required to honor bearer tokens that
+ are provisioned by an external authentication service.
+ properties:
+ kubeConfig:
+ description: "kubeConfig references a secret that contains
+ kube config file data which describes how to access
+ the remote webhook service. The namespace for the
+ referenced secret is openshift-config. \n For further
+ details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ \n The key \"kubeConfig\" is used to locate the data.
+ If the secret or expected key is not found, the webhook
+ is not honored. If the specified kube config data
+ is not valid, the webhook is not honored."
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - kubeConfig
+ type: object
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators is DEPRECATED, setting
+ it has no effect.
+ items:
+ description: deprecatedWebhookTokenAuthenticator holds
+ the necessary configuration options for a remote token
+ authenticator. It's the same as WebhookTokenAuthenticator
+ but it's missing the 'required' validation on KubeConfig
+ field.
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file
+ data which describes how to access the remote webhook
+ service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data.
+ If the secret or expected key is not found, the
+ webhook is not honored. If the specified kube config
+ data is not valid, the webhook is not honored. The
+ namespace for this secret is determined by the point
+ of use.'
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ featureGate:
+ description: FeatureGate holds cluster-wide information about
+ feature gates.
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling
+ of any feature. Turning this feature set on IS NOT SUPPORTED,
+ CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its
+ nature, this setting cannot be validated. If you have
+ any typos or accidentally apply invalid combinations your
+ cluster may fail in an unrecoverable way. featureSet
+ must equal "CustomNoUpgrade" must be set to use this field.
+ nullable: true
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates
+ that you want to force off
+ items:
+ type: string
+ type: array
+ enabled:
+ description: enabled is a list of all feature gates
+ that you want to force on
+ items:
+ type: string
+ type: array
+ type: object
+ featureSet:
+ description: featureSet changes the list of features in
+ the cluster. The default is empty. Be very careful adjusting
+ this setting. Turning on or off features may cause irreversible
+ changes in your cluster which cannot be undone.
+ type: string
+ type: object
+ image:
+ description: Image governs policies related to imagestream imports
+ and runtime configuration for external registries. It allows
+ cluster admins to configure which registries OpenShift is
+ allowed to import images from, extra CA trust bundles for
+ external registries, and policies to block or allow registry
+ hostnames. When exposing OpenShift's image registry to the
+ public, this also lets cluster admins specify the external
+ hostname.
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap
+ containing additional CAs that should be trusted during
+ imagestream import, pod image pull, build image pull,
+ and imageregistry pullthrough. The namespace for this
+ config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container
+ image registries that normal users may import images from.
+ Set this list to the registries that you trust to contain
+ valid Docker images and that you want applications to
+ be able to import from. Users with permission to create
+ Images or ImageStreamMappings via the API are not affected
+ by this policy - typically only administrators or system
+ integrations will have those permissions.
+ items:
+ description: RegistryLocation contains a location of the
+ registry specified by the registry domain name. The
+ domain name might include wildcards, like '*' or '??'.
+ properties:
+ domainName:
+ description: domainName specifies a domain name for
+ the registry In case the registry use non-standard
+ (80 or 443) port, the port should be included in
+ the domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry
+ is secure (https) or insecure (http) By default
+ (if not specified) the registry is assumed as secure.
+ type: boolean
+ type: object
+ type: array
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames
+ for the default external image registry. The external
+ hostname should be set only when the image registry is
+ exposed externally. The first value is used in 'publicDockerImageRepository'
+ field in ImageStreams. The value must be in "hostname[:port]"
+ format.
+ items:
+ type: string
+ type: array
+ registrySources:
+ description: registrySources contains configuration that
+ determines how the container runtime should treat individual
+ registries when accessing images for builds+pods. (e.g.
+ whether or not to allow insecure access). It does not
+ contain configuration for the internal cluster registry.
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are the only registries
+ permitted for image pull and push actions. All other
+ registries are denied. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ items:
+ type: string
+ type: array
+ blockedRegistries:
+ description: "blockedRegistries cannot be used for image
+ pull and push actions. All other registries are permitted.
+ \n Only one of BlockedRegistries or AllowedRegistries
+ may be set."
+ items:
+ type: string
+ type: array
+ containerRuntimeSearchRegistries:
+ description: 'containerRuntimeSearchRegistries are registries
+ that will be searched when pulling images that do
+ not have fully qualified domains in their pull specs.
+ Registries will be searched in the order provided
+ in the list. Note: this search list only works with
+ the container runtime, i.e CRI-O. Will NOT work with
+ builds or imagestream imports.'
+ format: hostname
+ items:
+ type: string
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ insecureRegistries:
+ description: insecureRegistries are registries which
+ do not have a valid TLS certificates or only support
+ HTTP connections.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ ingress:
+ description: Ingress holds cluster-wide information about ingress,
+ including the default ingress domain used for routes.
+ properties:
+ appsDomain:
+ description: appsDomain is an optional domain to use instead
+ of the one specified in the domain field when a Route
+ is created without specifying an explicit host. If appsDomain
+ is nonempty, this value is used to generate default host
+ values for Route. Unlike domain, appsDomain may be modified
+ after installation. This assumes a new ingresscontroller
+ has been setup with a wildcard certificate.
+ type: string
+ componentRoutes:
+ description: "componentRoutes is an optional list of routes
+ that are managed by OpenShift components that a cluster-admin
+ is able to configure the hostname and serving certificate
+ for. The namespace and name of each route in this list
+ should match an existing entry in the status.componentRoutes
+ list. \n To determine the set of configurable Routes,
+ look at namespace and name of entries in the .status.componentRoutes
+ list, where participating operators write the status of
+ configurable routes."
+ items:
+ description: ComponentRouteSpec allows for configuration
+ of a route's hostname and serving certificate.
+ properties:
+ hostname:
+ description: hostname is the hostname that should
+ be used by the route.
+ pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$
+ type: string
+ name:
+ description: "name is the logical name of the route
+ to customize. \n The namespace and name of this
+ componentRoute must match a corresponding entry
+ in the list of status.componentRoutes if the route
+ is to be customized."
+ maxLength: 256
+ minLength: 1
+ type: string
+ namespace:
+ description: "namespace is the namespace of the route
+ to customize. \n The namespace and name of this
+ componentRoute must match a corresponding entry
+ in the list of status.componentRoutes if the route
+ is to be customized."
+ maxLength: 63
+ minLength: 1
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ servingCertKeyPairSecret:
+ description: servingCertKeyPairSecret is a reference
+ to a secret of type `kubernetes.io/tls` in the openshift-config
+ namespace. The serving cert/key pair must match
+ and will be used by the operator to fulfill the
+ intent of serving with this name. If the custom
+ hostname uses the default routing suffix of the
+ cluster, the Secret specification for a serving
+ certificate will not be needed.
+ properties:
+ name:
+ description: name is the metadata.name of the
+ referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - hostname
+ - name
+ - namespace
+ type: object
+ type: array
+ domain:
+ description: "domain is used to generate a default host
+ name for a route when the route's host name is empty.
+ The generated host name will follow this pattern: \"..\".
+ \n It is also used as the default wildcard domain suffix
+ for ingress. The default ingresscontroller domain will
+ follow this pattern: \"*.\". \n Once set, changing
+ domain is not currently supported."
+ type: string
+ requiredHSTSPolicies:
+ description: "requiredHSTSPolicies specifies HSTS policies
+ that are required to be set on newly created or updated
+ routes matching the domainPattern/s and namespaceSelector/s
+ that are specified in the policy. Each requiredHSTSPolicy
+ must have at least a domainPattern and a maxAge to validate
+ a route HSTS Policy route annotation, and affect route
+ admission. \n A candidate route is checked for HSTS Policies
+ if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\"
+ E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains
+ \n - For each candidate route, if it matches a requiredHSTSPolicy
+ domainPattern and optional namespaceSelector, then the
+ maxAge, preloadPolicy, and includeSubdomainsPolicy must
+ be valid to be admitted. Otherwise, the route is rejected.
+ - The first match, by domainPattern and optional namespaceSelector,
+ in the ordering of the RequiredHSTSPolicies determines
+ the route's admission status. - If the candidate route
+ doesn't match any requiredHSTSPolicy domainPattern and
+ optional namespaceSelector, then it may use any HSTS Policy
+ annotation. \n The HSTS policy configuration may be changed
+ after routes have already been created. An update to a
+ previously admitted route may then fail if the updated
+ route does not conform to the updated HSTS policy configuration.
+ However, changing the HSTS policy configuration will not
+ cause a route that is already admitted to stop working.
+ \n Note that if there are no RequiredHSTSPolicies, any
+ HSTS Policy annotation on the route is valid."
+ items:
+ properties:
+ domainPatterns:
+ description: "domainPatterns is a list of domains
+ for which the desired HSTS annotations are required.
+ If domainPatterns is specified and a route is created
+ with a spec.host matching one of the domains, the
+ route must specify the HSTS Policy components described
+ in the matching RequiredHSTSPolicy. \n The use of
+ wildcards is allowed like this: *.foo.com matches
+ everything under foo.com. foo.com only matches foo.com,
+ so to cover foo.com and everything under it, you
+ must specify *both*."
+ items:
+ type: string
+ minItems: 1
+ type: array
+ includeSubDomainsPolicy:
+ description: 'includeSubDomainsPolicy means the HSTS
+ Policy should apply to any subdomains of the host''s
+ domain name. Thus, for the host bar.foo.com, if
+ includeSubDomainsPolicy was set to RequireIncludeSubDomains:
+ - the host app.bar.foo.com would inherit the HSTS
+ Policy of bar.foo.com - the host bar.foo.com would
+ inherit the HSTS Policy of bar.foo.com - the host
+ foo.com would NOT inherit the HSTS Policy of bar.foo.com
+ - the host def.foo.com would NOT inherit the HSTS
+ Policy of bar.foo.com'
+ enum:
+ - RequireIncludeSubDomains
+ - RequireNoIncludeSubDomains
+ - NoOpinion
+ type: string
+ maxAge:
+ description: maxAge is the delta time range in seconds
+ during which hosts are regarded as HSTS hosts. If
+ set to 0, it negates the effect, and hosts are removed
+ as HSTS hosts. If set to 0 and includeSubdomains
+ is specified, all subdomains of the host are also
+ removed as HSTS hosts. maxAge is a time-to-live
+ value, and if this policy is not refreshed on a
+ client, the HSTS policy will eventually expire on
+ that client.
+ properties:
+ largestMaxAge:
+ description: The largest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age This value
+ can be left unspecified, in which case no upper
+ limit is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ smallestMaxAge:
+ description: The smallest allowed value (in seconds)
+ of the RequiredHSTSPolicy max-age Setting max-age=0
+ allows the deletion of an existing HSTS header
+ from a host. This is a necessary tool for administrators
+ to quickly correct mistakes. This value can
+ be left unspecified, in which case no lower
+ limit is enforced.
+ format: int32
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ type: object
+ namespaceSelector:
+ description: namespaceSelector specifies a label selector
+ such that the policy applies only to those routes
+ that are in namespaces with labels that match the
+ selector, and are in one of the DomainPatterns.
+ Defaults to the empty LabelSelector, which matches
+ everything.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ preloadPolicy:
+ description: preloadPolicy directs the client to include
+ hosts in its host preload list so that it never
+ needs to do an initial load to get the HSTS header
+ (note that this is not defined in RFC 6797 and is
+ therefore client implementation-dependent).
+ enum:
+ - RequirePreload
+ - RequireNoPreload
+ - NoOpinion
+ type: string
+ required:
+ - domainPatterns
+ type: object
+ type: array
+ type: object
+ network:
+ description: 'Network holds cluster-wide information about the
+ network. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin,
+ etc. Please view network.spec for an explanation on what applies
+ when configuring this resource. TODO (csrwng): Add validation
+ here to exclude changes that conflict with networking settings
+ in the HostedCluster.Spec.Networking field.'
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field
+ is immutable after installation.
+ items:
+ description: ClusterNetworkEntry is a contiguous block
+ of IP addresses from which pod IPs are allocated.
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate
+ to each node. If this field is not used by the plugin,
+ it can be left unset.
+ format: int32
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ externalIP:
+ description: externalIP defines configuration for controllers
+ that affect Service.ExternalIP. If nil, then ExternalIP
+ is not allowed to be set.
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from
+ which to automatically assign Service.ExternalIP.
+ These are assigned when the service is of type LoadBalancer.
+ In general, this is only useful for bare-metal clusters.
+ In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected
+ by any ExternalIPPolicy rules. Currently, only one
+ entry may be provided.
+ items:
+ type: string
+ type: array
+ policy:
+ description: policy is a set of restrictions applied
+ to the ExternalIP field. If nil or empty, then ExternalIP
+ is not allowed to be set.
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed
+ CIDRs.
+ items:
+ type: string
+ type: array
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed
+ CIDRs. These take precedence over allowedCIDRs.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed
+ (e.g. OpenShiftSDN). This should match a value that the
+ cluster-network-operator understands, or else no networking
+ will be installed. Currently supported values are: - OpenShiftSDN
+ This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we
+ only support a single entry here. This field is immutable
+ after installation.
+ items:
+ type: string
+ type: array
+ serviceNodePortRange:
+ description: The port range allowed for Services of type
+ NodePort. If not specified, the default of 30000-32767
+ will be used. Such Services without a NodePort specified
+ will have one automatically allocated from this range.
+ This parameter can be updated after the cluster is installed.
+ pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: string
+ type: object
+ oauth:
+ description: OAuth holds cluster-wide information about OAuth.
+ It is used to configure the integrated OAuth server. This
+ configuration is only honored when the top level Authentication
+ config has type set to IntegratedOAuth.
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways
+ for a user to identify themselves. When this list is empty,
+ no identities are provisioned for users.
+ items:
+ description: IdentityProvider provides identities for
+ users authenticating using credentials
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options
+ for the BasicAuth IdP
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to
+ locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used
+ to locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect
+ to
+ type: string
+ type: object
+ github:
+ description: github enables user authentication using
+ GitHub credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. This can
+ only be configured when hostname is set to a
+ non-empty value. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostname:
+ description: hostname is the optional domain (e.g.
+ "mycompany.com") for use with a hosted instance
+ of GitHub Enterprise. It must match the GitHub
+ Enterprise settings value configured at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts
+ which organizations are allowed to log in
+ items:
+ type: string
+ type: array
+ teams:
+ description: teams optionally restricts which
+ teams are allowed to log in. Format is /.
+ items:
+ type: string
+ type: array
+ type: object
+ gitlab:
+ description: gitlab enables user authentication using
+ GitLab credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the oauth server base URL
+ type: string
+ type: object
+ google:
+ description: google enables user authentication using
+ Google credentials
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ hostedDomain:
+ description: hostedDomain is the optional Google
+ App domain (e.g. "mycompany.com") to restrict
+ logins to
+ type: string
+ type: object
+ htpasswd:
+ description: htpasswd enables user authentication
+ using an HTPasswd file to validate credentials
+ properties:
+ fileData:
+ description: fileData is a required reference
+ to a secret by name containing the data to use
+ as the htpasswd file. The key "htpasswd" is
+ used to locate the data. If the secret or expected
+ key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not
+ valid, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ keystone:
+ description: keystone enables user authentication
+ using keystone password credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ domainName:
+ description: domainName is required for keystone
+ v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS client certificate to present when connecting
+ to the server. The key "tls.crt" is used to
+ locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ tlsClientKey:
+ description: tlsClientKey is an optional reference
+ to a secret by name that contains the PEM-encoded
+ TLS private key for the client certificate referenced
+ in tlsClientCert. The key "tls.key" is used
+ to locate the data. If specified and the secret
+ or expected key is not found, the identity provider
+ is not honored. If the specified certificate
+ data is not valid, the identity provider is
+ not honored. The namespace for this secret is
+ openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ url:
+ description: url is the remote URL to connect
+ to
+ type: string
+ type: object
+ ldap:
+ description: ldap enables user authentication using
+ LDAP credentials
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to
+ identities
+ properties:
+ email:
+ description: email is the list of attributes
+ whose values should be used as the email
+ address. Optional. If unspecified, no email
+ is set for the identity
+ items:
+ type: string
+ type: array
+ id:
+ description: id is the list of attributes
+ whose values should be used as the user
+ ID. Required. First non-empty attribute
+ is used. At least one attribute is required.
+ If none of the listed attribute have a value,
+ authentication fails. LDAP standard identity
+ attribute is "dn"
+ items:
+ type: string
+ type: array
+ name:
+ description: name is the list of attributes
+ whose values should be used as the display
+ name. Optional. If unspecified, no display
+ name is set for the identity LDAP standard
+ display name attribute is "cn"
+ items:
+ type: string
+ type: array
+ preferredUsername:
+ description: preferredUsername is the list
+ of attributes whose values should be used
+ as the preferred username. LDAP standard
+ login attribute is "uid"
+ items:
+ type: string
+ type: array
+ type: object
+ bindDN:
+ description: bindDN is an optional DN to bind
+ with during the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference
+ to a secret by name containing a password to
+ bind with during the search phase. The key "bindPassword"
+ is used to locate the data. If specified and
+ the secret or expected key is not found, the
+ identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: 'insecure, if true, indicates the
+ connection should not use TLS WARNING: Should
+ not be set to `true` with the URL scheme "ldaps://"
+ as "ldaps://" URLs always attempt to connect
+ using TLS, even when `insecure` is set to `true`
+ When `true`, "ldap://" URLS connect insecurely.
+ When `false`, "ldap://" URLs are upgraded to
+ a TLS connection using StartTLS as specified
+ in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies
+ the LDAP search parameters to use. The syntax
+ of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ type: object
+ mappingMethod:
+ description: mappingMethod determines how identities
+ from this provider are mapped to users Defaults
+ to "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities
+ returned by this provider. - It MUST be unique and
+ not shared by any other identity provider used -
+ It MUST be a valid path segment: name cannot equal
+ "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using
+ OpenID credentials
+ properties:
+ ca:
+ description: ca is an optional reference to a
+ config map by name containing the PEM-encoded
+ CA bundle. It is used as a trust anchor to validate
+ the TLS certificate presented by the remote
+ server. The key "ca.crt" is used to locate the
+ data. If specified and the config map or expected
+ key is not found, the identity provider is not
+ honored. If the specified ca data is not valid,
+ the identity provider is not honored. If empty,
+ the default system roots are used. The namespace
+ for this config map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ claims:
+ description: claims mappings
+ properties:
+ email:
+ description: email is the list of claims whose
+ values should be used as the email address.
+ Optional. If unspecified, no email is set
+ for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ groups:
+ description: groups is the list of claims
+ value of which should be used to synchronize
+ groups from the OIDC provider to OpenShift
+ for the user. If multiple claims are specified,
+ the first one with a non-empty value is
+ used.
+ items:
+ description: OpenIDClaim represents a claim
+ retrieved from an OpenID provider's tokens
+ or userInfo responses
+ minLength: 1
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ description: name is the list of claims whose
+ values should be used as the display name.
+ Optional. If unspecified, no display name
+ is set for the identity
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ preferredUsername:
+ description: preferredUsername is the list
+ of claims whose values should be used as
+ the preferred username. If unspecified,
+ the preferred username is determined from
+ the value of the sub claim
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference
+ to the secret by name containing the oauth client
+ secret. The key "clientSecret" is used to locate
+ the data. If the secret or expected key is not
+ found, the identity provider is not honored.
+ The namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced secret
+ type: string
+ required:
+ - name
+ type: object
+ extraAuthorizeParameters:
+ additionalProperties:
+ type: string
+ description: extraAuthorizeParameters are any
+ custom parameters to add to the authorize request.
+ type: object
+ extraScopes:
+ description: extraScopes are any scopes to request
+ in addition to the standard "openid" scope.
+ items:
+ type: string
+ type: array
+ issuer:
+ description: issuer is the URL that the OpenID
+ Provider asserts as its Issuer Identifier. It
+ must use the https scheme with no query or fragment
+ component.
+ type: string
+ type: object
+ requestHeader:
+ description: requestHeader enables user authentication
+ using request header credentials
+ properties:
+ ca:
+ description: ca is a required reference to a config
+ map by name containing the PEM-encoded CA bundle.
+ It is used as a trust anchor to validate the
+ TLS certificate presented by the remote server.
+ Specifically, it allows verification of incoming
+ requests to prevent header spoofing. The key
+ "ca.crt" is used to locate the data. If the
+ config map or expected key is not found, the
+ identity provider is not honored. If the specified
+ ca data is not valid, the identity provider
+ is not honored. The namespace for this config
+ map is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of
+ the referenced config map
+ type: string
+ required:
+ - name
+ type: object
+ challengeURL:
+ description: challengeURL is a URL to redirect
+ unauthenticated /authorize requests to Unauthenticated
+ requests from OAuth clients which expect WWW-Authenticate
+ challenges will be redirected here. ${url} is
+ replaced with the current URL, escaped to be
+ safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query
+ string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional
+ list of common names to require a match from.
+ If empty, any client certificate validated against
+ the clientCA bundle is considered authoritative.
+ items:
+ type: string
+ type: array
+ emailHeaders:
+ description: emailHeaders is the set of headers
+ to check for the email address
+ items:
+ type: string
+ type: array
+ headers:
+ description: headers is the set of headers to
+ check for identity information
+ items:
+ type: string
+ type: array
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests
+ from OAuth clients which expect interactive
+ logins will be redirected here ${url} is replaced
+ with the current URL, escaped to be safe in
+ a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query
+ string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers
+ to check for the display name
+ items:
+ type: string
+ type: array
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set
+ of headers to check for the preferred username
+ items:
+ type: string
+ type: array
+ type: object
+ type:
+ description: type identifies the identity provider
+ type for this entry.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ templates:
+ description: templates allow you to customize pages like
+ the login page.
+ properties:
+ error:
+ description: error is the name of a secret that specifies
+ a go template to use to render error pages during
+ the authentication or grant flow. The key "errors.html"
+ is used to locate the template data. If specified
+ and the secret or expected key is not found, the default
+ error page is used. If the specified template is not
+ valid, the default error page is used. If unspecified,
+ the default error page is used. The namespace for
+ this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ login:
+ description: login is the name of a secret that specifies
+ a go template to use to render the login page. The
+ key "login.html" is used to locate the template data.
+ If specified and the secret or expected key is not
+ found, the default login page is used. If the specified
+ template is not valid, the default login page is used.
+ If unspecified, the default login page is used. The
+ namespace for this secret is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ providerSelection:
+ description: providerSelection is the name of a secret
+ that specifies a go template to use to render the
+ provider selection page. The key "providers.html"
+ is used to locate the template data. If specified
+ and the secret or expected key is not found, the default
+ provider selection page is used. If the specified
+ template is not valid, the default provider selection
+ page is used. If unspecified, the default provider
+ selection page is used. The namespace for this secret
+ is openshift-config.
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ tokenConfig:
+ description: tokenConfig contains options for authorization
+ and access tokens
+ properties:
+ accessTokenInactivityTimeout:
+ description: "accessTokenInactivityTimeout defines the
+ token inactivity timeout for tokens granted by any
+ client. The value represents the maximum amount of
+ time that can occur between consecutive uses of the
+ token. Tokens become invalid if they are not used
+ within this temporal window. The user will need to
+ acquire a new token to regain access once a token
+ times out. Takes valid time duration string such as
+ \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed
+ value for duration is 300s (5 minutes). If the timeout
+ is configured per client, then that value takes precedence.
+ If the timeout value is not specified and the client
+ does not override the value, then tokens are valid
+ until their lifetime. \n WARNING: existing tokens'
+ timeout will not be affected (lowered) by changing
+ this value"
+ type: string
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds -
+ DEPRECATED: setting this field has no effect.'
+ format: int32
+ type: integer
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum
+ age of access tokens
+ format: int32
+ type: integer
+ type: object
+ type: object
+ proxy:
+ description: Proxy holds cluster-wide information on how to
+ configure default proxies for the cluster.
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP
+ requests. Empty means unset and will not result in an
+ env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS
+ requests. Empty means unset and will not result in an
+ env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs and/or IPs for which the proxy should not
+ be used. Empty means unset and will not result in an env
+ var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ items:
+ type: string
+ type: array
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle. The trustedCA field should only
+ be consumed by a proxy validator. The validator is responsible
+ for reading the certificate bundle from the required key
+ \"ca-bundle.crt\", merging it with the system default
+ trust bundle, and writing the merged trust bundle to a
+ ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. Clients that expect to make proxy connections
+ must use the trusted-ca-bundle for all HTTPS requests
+ to the proxy, and may use the trusted-ca-bundle for non-proxy
+ HTTPS requests as well. \n The namespace for the ConfigMap
+ referenced by trustedCA is \"openshift-config\". Here
+ is an example ConfigMap (in yaml): \n apiVersion: v1 kind:
+ ConfigMap metadata: name: user-ca-bundle namespace: openshift-config
+ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom
+ CA certificate bundle. -----END CERTIFICATE-----"
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ type: object
+ scheduler:
+ description: Scheduler holds cluster-wide config information
+ to run the Kubernetes Scheduler and influence its placement
+ decisions. The canonical name for this config is `cluster`.
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide
+ default node selector to restrict pod placement to specific
+ nodes. This is applied to the pods created in all namespaces
+ and creates an intersection with any existing nodeSelectors
+ already set on a pod, additionally constraining that pod''s
+ selector. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having
+ project-wide node selectors won''t be impacted even if
+ this field is set. This adds an annotation section to
+ the namespace. For example, if a new namespace is created
+ with node-selector=''type=user-node,region=east'', the
+ annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector
+ annotation is set on the project the value is used in
+ preference to the value we are setting for defaultNodeSelector
+ field. For instance, openshift.io/node-selector: "type=user-node,region=west"
+ means that the default of "type=user-node,region=east"
+ set in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to
+ be schedulable. When this flag is turned on, all the master
+ nodes in the cluster will be made schedulable, so that
+ workload pods can run on them. The default value for this
+ field is false, meaning none of the master nodes are schedulable.
+ Important Note: Once the workload pods start running on
+ the master nodes, extreme care must be taken to ensure
+ that cluster-critical control plane components are not
+ impacted. Please turn on this field after doing due diligence.'
+ type: boolean
+ policy:
+ description: 'DEPRECATED: the scheduler Policy API has been
+ deprecated and will be removed in a future release. policy
+ is a reference to a ConfigMap containing scheduler policy
+ which has user specified predicates and priorities. If
+ this ConfigMap is not available scheduler will default
+ to use DefaultAlgorithmProvider. The namespace for this
+ configmap is openshift-config.'
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ required:
+ - name
+ type: object
+ profile:
+ description: "profile sets which scheduling profile should
+ be set in order to configure scheduling decisions for
+ new pods. \n Valid values are \"LowNodeUtilization\",
+ \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\""
+ enum:
+ - ""
+ - LowNodeUtilization
+ - HighNodeUtilization
+ - NoScoring
+ type: string
+ type: object
+ type: object
+ controllerAvailabilityPolicy:
+ default: SingleReplica
+ description: ControllerAvailabilityPolicy specifies the availability
+ policy applied to critical control plane components. The default
+ value is SingleReplica.
+ type: string
+ dns:
+ description: DNSSpec specifies the DNS configuration in the cluster.
+ properties:
+ baseDomain:
+ description: BaseDomain is the base domain of the cluster.
+ type: string
+ privateZoneID:
+ description: PrivateZoneID is the Hosted Zone ID where all the
+ DNS records that are only available internally to the cluster
+ exist.
+ type: string
+ publicZoneID:
+ description: PublicZoneID is the Hosted Zone ID where all the
+ DNS records that are publicly accessible to the internet exist.
+ type: string
+ required:
+ - baseDomain
+ type: object
+ etcd:
+ description: Etcd contains metadata about the etcd cluster the hypershift
+ managed Openshift control plane components use to store data.
+ properties:
+ managed:
+ description: Managed specifies the behavior of an etcd cluster
+ managed by HyperShift.
+ properties:
+ storage:
+ description: Storage specifies how etcd data is persisted.
+ properties:
+ persistentVolume:
+ description: PersistentVolume is the configuration for
+ PersistentVolume etcd storage. With this implementation,
+ a PersistentVolume will be allocated for every etcd
+ member (either 1 or 3 depending on the HostedCluster
+ control plane availability configuration).
+ properties:
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Size is the minimum size of the data
+ volume for each etcd member.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClassName:
+ description: "StorageClassName is the StorageClass
+ of the data volume for each etcd member. \n See
+ https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1."
+ type: string
+ type: object
+ restoreSnapshotURL:
+ description: RestoreSnapshotURL allows an optional list
+ of URLs to be provided where an etcd snapshot can
+ be downloaded, for example a pre-signed URL referencing
+ a storage service, one URL per replica. This snapshot
+ will be restored on initial startup, only when the
+ etcd PV is empty.
+ items:
+ type: string
+ type: array
+ type:
+ description: Type is the kind of persistent storage
+ implementation to use for etcd.
+ enum:
+ - PersistentVolume
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - storage
+ type: object
+ managementType:
+ description: ManagementType defines how the etcd cluster is
+ managed.
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged specifies configuration which enables
+ the control plane to integrate with an eternally managed etcd
+ cluster.
+ properties:
+ endpoint:
+ description: "Endpoint is the full etcd cluster client endpoint
+ URL. For example: \n https://etcd-client:2379 \n If the
+ URL uses an HTTPS scheme, the TLS field is required."
+ pattern: ^https://
+ type: string
+ tls:
+ description: TLS specifies TLS configuration for HTTPS etcd
+ client endpoints.
+ properties:
+ clientSecret:
+ description: "ClientSecret refers to a secret for client
+ mTLS authentication with the etcd cluster. It may
+ have the following key/value pairs: \n etcd-client-ca.crt:
+ Certificate Authority value etcd-client.crt: Client
+ certificate value etcd-client.key: Client certificate
+ key value"
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - clientSecret
+ type: object
+ required:
+ - endpoint
+ - tls
+ type: object
+ required:
+ - managementType
+ type: object
+ fips:
+ description: FIPS specifies if the nodes for the cluster will be
+ running in FIPS mode
+ type: boolean
+ imageContentSources:
+ description: ImageContentSources lists sources/repositories for
+ the release-image content.
+ items:
+ description: ImageContentSource specifies image mirrors that can
+ be used by cluster nodes to pull content. For cluster workloads,
+ if a container image registry host of the pullspec matches Source
+ then one of the Mirrors are substituted as hosts in the pullspec
+ and tried in order to fetch the image.
+ properties:
+ mirrors:
+ description: Mirrors are one or more repositories that may
+ also contain the same images.
+ items:
+ type: string
+ type: array
+ source:
+ description: Source is the repository that users refer to,
+ e.g. in image pull specifications.
+ type: string
+ required:
+ - source
+ type: object
+ type: array
+ infraID:
+ type: string
+ infrastructureAvailabilityPolicy:
+ default: SingleReplica
+ description: InfrastructureAvailabilityPolicy specifies the availability
+ policy applied to infrastructure services which run on cluster
+ nodes. The default value is SingleReplica.
+ type: string
+ issuerURL:
+ description: IssuerURL is an OIDC issuer URL which is used as the
+ issuer in all ServiceAccount tokens generated by the control plane
+ API server. The default value is kubernetes.default.svc, which
+ only works for in-cluster validation.
+ type: string
+ kubeconfig:
+ description: KubeConfig specifies the name and key for the kubeconfig
+ secret
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ networking:
+ description: Networking specifies network configuration for the
+ cluster. Temporarily optional for backward compatibility, required
+ in future releases.
+ properties:
+ apiServer:
+ description: APIServer contains advanced network settings for
+ the API server that affect how the APIServer is exposed inside
+ a cluster node.
+ properties:
+ advertiseAddress:
+ description: AdvertiseAddress is the address that nodes
+ will use to talk to the API server. This is an address
+ associated with the loopback adapter of each node. If
+ not specified, 172.20.0.1 is used.
+ type: string
+ allowedCIDRBlocks:
+ description: AllowedCIDRBlocks is an allow list of CIDR
+ blocks that can access the APIServer If not specified,
+ traffic is allowed from all addresses. This depends on
+ underlying support by the cloud provider for Service LoadBalancerSourceRanges
+ items:
+ pattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$
+ type: string
+ type: array
+ port:
+ description: Port is the port at which the APIServer is
+ exposed inside a node. Other pods using host networking
+ cannot listen on this port. If not specified, 6443 is
+ used.
+ format: int32
+ type: integer
+ type: object
+ clusterNetwork:
+ description: ClusterNetwork is the list of IP address pools
+ for pods.
+ items:
+ description: ClusterNetworkEntry is a single IP address block
+ for pod IP blocks. IP blocks are allocated with size 2^HostSubnetLength.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool.
+ type: string
+ hostPrefix:
+ description: HostPrefix is the prefix size to allocate
+ to each node from the CIDR. For example, 24 would allocate
+ 2^8=256 adresses to each node. If this field is not
+ used by the plugin, it can be left unset.
+ format: int32
+ type: integer
+ required:
+ - cidr
+ type: object
+ type: array
+ machineNetwork:
+ description: MachineNetwork is the list of IP address pools
+ for machines.
+ items:
+ description: MachineNetworkEntry is a single IP address block
+ for node IP blocks.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for machines
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ networkType:
+ default: OVNKubernetes
+ description: NetworkType specifies the SDN provider used for
+ cluster networking.
+ enum:
+ - OpenShiftSDN
+ - Calico
+ - OVNKubernetes
+ - Other
+ type: string
+ serviceNetwork:
+ description: 'ServiceNetwork is the list of IP address pools
+ for services. NOTE: currently only one entry is supported.'
+ items:
+ description: ServiceNetworkEntry is a single IP address block
+ for the service network.
+ properties:
+ cidr:
+ description: CIDR is the IP block address pool for services
+ within the cluster.
+ type: string
+ required:
+ - cidr
+ type: object
+ type: array
+ required:
+ - clusterNetwork
+ - networkType
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector when specified, must be true for the pods
+ managed by the HostedCluster to be scheduled.
+ type: object
+ olmCatalogPlacement:
+ default: management
+ description: OLMCatalogPlacement specifies the placement of OLM
+ catalog components. By default, this is set to management and
+ OLM catalog components are deployed onto the management cluster.
+ If set to guest, the OLM catalog components will be deployed onto
+ the guest cluster.
+ enum:
+ - management
+ - guest
+ type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format
+ or a boolean. If a date is provided: reconciliation is paused
+ on the resource until that date. If the boolean true is provided:
+ reconciliation is paused on the resource until the field is removed.'
+ type: string
+ platform:
+ description: PlatformSpec specifies the underlying infrastructure
+ provider for the cluster and is used to configure platform specific
+ behavior.
+ properties:
+ agent:
+ description: Agent specifies configuration for agent-based installations.
+ properties:
+ agentNamespace:
+ description: AgentNamespace is the namespace where to search
+ for Agents for this cluster
+ type: string
+ required:
+ - agentNamespace
+ type: object
+ aws:
+ description: AWS specifies configuration for clusters running
+ on Amazon Web Services.
+ properties:
+ cloudProviderConfig:
+ description: 'CloudProviderConfig specifies AWS networking
+ configuration for the control plane. This is mainly used
+ for cloud provider controller config: https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364
+ TODO(dan): should this be named AWSNetworkConfig?'
+ properties:
+ subnet:
+ description: Subnet is the subnet to use for control
+ plane cloud resources.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs
+ used to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify
+ an AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC is the VPC to use for control plane
+ cloud resources.
+ type: string
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created.
+ type: string
+ required:
+ - vpc
+ type: object
+ endpointAccess:
+ default: Public
+ description: EndpointAccess specifies the publishing scope
+ of cluster endpoints. The default is Public.
+ enum:
+ - Public
+ - PublicAndPrivate
+ - Private
+ type: string
+ region:
+ description: Region is the AWS region in which the cluster
+ resides. This configures the OCP control plane cloud integrations,
+ and is used by NodePool to resolve the correct boot AMI
+ for a given release.
+ type: string
+ resourceTags:
+ description: ResourceTags is a list of additional tags to
+ apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports
+ a maximum of 50 tags per resource. OpenShift reserves
+ 25 tags for its use, leaving 25 tags available for the
+ user.
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ rolesRef:
+ description: RolesRef contains references to various AWS
+ IAM roles required to enable integrations such as OIDC.
+ properties:
+ controlPlaneOperatorARN:
+ description: "ControlPlaneOperatorARN is an ARN value
+ referencing a role appropriate for the Control Plane
+ Operator. \n The following is an example of a valid
+ policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"ec2:CreateVpcEndpoint\", \"ec2:DescribeVpcEndpoints\",
+ \"ec2:ModifyVpcEndpoint\", \"ec2:DeleteVpcEndpoints\",
+ \"ec2:CreateTags\", \"route53:ListHostedZones\" ],
+ \"Resource\": \"*\" }, { \"Effect\": \"Allow\", \"Action\":
+ [ \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\"
+ ], \"Resource\": \"arn:aws:route53:::%s\" } ] }"
+ type: string
+ imageRegistryARN:
+ description: "ImageRegistryARN is an ARN value referencing
+ a role appropriate for the Image Registry Operator.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"s3:CreateBucket\",
+ \"s3:DeleteBucket\", \"s3:PutBucketTagging\", \"s3:GetBucketTagging\",
+ \"s3:PutBucketPublicAccessBlock\", \"s3:GetBucketPublicAccessBlock\",
+ \"s3:PutEncryptionConfiguration\", \"s3:GetEncryptionConfiguration\",
+ \"s3:PutLifecycleConfiguration\", \"s3:GetLifecycleConfiguration\",
+ \"s3:GetBucketLocation\", \"s3:ListBucket\", \"s3:GetObject\",
+ \"s3:PutObject\", \"s3:DeleteObject\", \"s3:ListBucketMultipartUploads\",
+ \"s3:AbortMultipartUpload\", \"s3:ListMultipartUploadParts\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ ingressARN:
+ description: "The referenced role must have a trust
+ relationship that allows it to be assumed via web
+ identity. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.
+ Example: { \"Version\": \"2012-10-17\", \"Statement\":
+ [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\":
+ \"{{ .ProviderARN }}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\",
+ \"Condition\": { \"StringEquals\": { \"{{ .ProviderName
+ }}:sub\": {{ .ServiceAccounts }} } } } ] } \n IngressARN
+ is an ARN value referencing a role appropriate for
+ the Ingress Operator. \n The following is an example
+ of a valid policy document: \n { \"Version\": \"2012-10-17\",
+ \"Statement\": [ { \"Effect\": \"Allow\", \"Action\":
+ [ \"elasticloadbalancing:DescribeLoadBalancers\",
+ \"tag:GetResources\", \"route53:ListHostedZones\"
+ ], \"Resource\": \"*\" }, { \"Effect\": \"Allow\",
+ \"Action\": [ \"route53:ChangeResourceRecordSets\"
+ ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\",
+ \"arn:aws:route53:::PRIVATE_ZONE_ID\" ] } ] }"
+ type: string
+ kubeCloudControllerARN:
+ description: "KubeCloudControllerARN is an ARN value
+ referencing a role appropriate for the KCM/KCC. \n
+ The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeImages\",
+ \"ec2:DescribeRegions\", \"ec2:DescribeRouteTables\",
+ \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\",
+ \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\",
+ \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\",
+ \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\",
+ \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\",
+ \"elasticloadbalancing:AttachLoadBalancerToSubnets\",
+ \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\",
+ \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\",
+ \"elasticloadbalancing:CreateLoadBalancerListeners\",
+ \"elasticloadbalancing:ConfigureHealthCheck\", \"elasticloadbalancing:DeleteLoadBalancer\",
+ \"elasticloadbalancing:DeleteLoadBalancerListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\",
+ \"elasticloadbalancing:DetachLoadBalancerFromSubnets\",
+ \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\",
+ \"elasticloadbalancing:ModifyLoadBalancerAttributes\",
+ \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\",
+ \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\",
+ \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\",
+ \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\",
+ \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DescribeListeners\",
+ \"elasticloadbalancing:DescribeLoadBalancerPolicies\",
+ \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\",
+ \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\",
+ \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\",
+ \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\"
+ } ] }"
+ type: string
+ networkARN:
+ description: "NetworkARN is an ARN value referencing
+ a role appropriate for the Network Operator. \n The
+ following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:DescribeInstances\",
+ \"ec2:DescribeInstanceStatus\", \"ec2:DescribeInstanceTypes\",
+ \"ec2:UnassignPrivateIpAddresses\", \"ec2:AssignPrivateIpAddresses\",
+ \"ec2:UnassignIpv6Addresses\", \"ec2:AssignIpv6Addresses\",
+ \"ec2:DescribeSubnets\", \"ec2:DescribeNetworkInterfaces\"
+ ], \"Resource\": \"*\" } ] }"
+ type: string
+ nodePoolManagementARN:
+ description: "NodePoolManagementARN is an ARN value
+ referencing a role appropriate for the CAPI Controller.
+ \n The following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Action\": [ \"ec2:AllocateAddress\", \"ec2:AssociateRouteTable\",
+ \"ec2:AttachInternetGateway\", \"ec2:AuthorizeSecurityGroupIngress\",
+ \"ec2:CreateInternetGateway\", \"ec2:CreateNatGateway\",
+ \"ec2:CreateRoute\", \"ec2:CreateRouteTable\", \"ec2:CreateSecurityGroup\",
+ \"ec2:CreateSubnet\", \"ec2:CreateTags\", \"ec2:DeleteInternetGateway\",
+ \"ec2:DeleteNatGateway\", \"ec2:DeleteRouteTable\",
+ \"ec2:DeleteSecurityGroup\", \"ec2:DeleteSubnet\",
+ \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\",
+ \"ec2:DescribeAddresses\", \"ec2:DescribeAvailabilityZones\",
+ \"ec2:DescribeImages\", \"ec2:DescribeInstances\",
+ \"ec2:DescribeInternetGateways\", \"ec2:DescribeNatGateways\",
+ \"ec2:DescribeNetworkInterfaces\", \"ec2:DescribeNetworkInterfaceAttribute\",
+ \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\",
+ \"ec2:DescribeSubnets\", \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\",
+ \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\",
+ \"ec2:DisassociateRouteTable\", \"ec2:DisassociateAddress\",
+ \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyNetworkInterfaceAttribute\",
+ \"ec2:ModifySubnetAttribute\", \"ec2:ReleaseAddress\",
+ \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\",
+ \"ec2:TerminateInstances\", \"tag:GetResources\",
+ \"ec2:CreateLaunchTemplate\", \"ec2:CreateLaunchTemplateVersion\",
+ \"ec2:DescribeLaunchTemplates\", \"ec2:DescribeLaunchTemplateVersions\",
+ \"ec2:DeleteLaunchTemplate\", \"ec2:DeleteLaunchTemplateVersions\"
+ ], \"Resource\": [ \"*\" ], \"Effect\": \"Allow\"
+ }, { \"Condition\": { \"StringLike\": { \"iam:AWSServiceName\":
+ \"elasticloadbalancing.amazonaws.com\" } }, \"Action\":
+ [ \"iam:CreateServiceLinkedRole\" ], \"Resource\":
+ [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"
+ ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\"
+ ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\"
+ ], \"Effect\": \"Allow\" } ] }"
+ type: string
+ storageARN:
+ description: "StorageARN is an ARN value referencing
+ a role appropriate for the Storage Operator. \n The
+ following is an example of a valid policy document:
+ \n { \"Version\": \"2012-10-17\", \"Statement\": [
+ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:AttachVolume\",
+ \"ec2:CreateSnapshot\", \"ec2:CreateTags\", \"ec2:CreateVolume\",
+ \"ec2:DeleteSnapshot\", \"ec2:DeleteTags\", \"ec2:DeleteVolume\",
+ \"ec2:DescribeInstances\", \"ec2:DescribeSnapshots\",
+ \"ec2:DescribeTags\", \"ec2:DescribeVolumes\", \"ec2:DescribeVolumesModifications\",
+ \"ec2:DetachVolume\", \"ec2:ModifyVolume\" ], \"Resource\":
+ \"*\" } ] }"
+ type: string
+ required:
+ - controlPlaneOperatorARN
+ - imageRegistryARN
+ - ingressARN
+ - kubeCloudControllerARN
+ - networkARN
+ - nodePoolManagementARN
+ - storageARN
+ type: object
+ serviceEndpoints:
+ description: "ServiceEndpoints specifies optional custom
+ endpoints which will override the default service endpoint
+ of specific AWS Services. \n There must be only one ServiceEndpoint
+ for a given service name."
+ items:
+ description: AWSServiceEndpoint stores the configuration
+ for services to override existing defaults of AWS Services.
+ properties:
+ name:
+ description: Name is the name of the AWS service.
+ This must be provided and cannot be empty.
+ type: string
+ url:
+ description: URL is fully qualified URI with scheme
+ https, that overrides the default generated endpoint
+ for a client. This must be provided and cannot be
+ empty.
+ pattern: ^https://
+ type: string
+ required:
+ - name
+ - url
+ type: object
+ type: array
+ required:
+ - region
+ - rolesRef
+ type: object
+ azure:
+ description: Azure defines azure specific settings
+ properties:
+ credentials:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same
+ namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ location:
+ type: string
+ machineIdentityID:
+ type: string
+ resourceGroup:
+ type: string
+ securityGroupName:
+ type: string
+ subnetName:
+ type: string
+ subscriptionID:
+ type: string
+ vnetID:
+ type: string
+ vnetName:
+ type: string
+ required:
+ - credentials
+ - location
+ - machineIdentityID
+ - resourceGroup
+ - securityGroupName
+ - subnetName
+ - subscriptionID
+ - vnetID
+ - vnetName
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for
+ components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ powervs:
+ description: PowerVS specifies configuration for clusters running
+ on IBMCloud Power VS Service. This field is immutable. Once
+ set, It can't be changed.
+ properties:
+ accountID:
+ description: AccountID is the IBMCloud account id. This
+ field is immutable. Once set, It can't be changed.
+ type: string
+ cisInstanceCRN:
+ description: CISInstanceCRN is the IBMCloud CIS Service
+ Instance's Cloud Resource Name This field is immutable.
+ Once set, It can't be changed.
+ pattern: '^crn:'
+ type: string
+ ingressOperatorCloudCreds:
+ description: IngressOperatorCloudCreds is a reference to
+ a secret containing ibm cloud credentials for ingress
+ operator to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ kubeCloudControllerCreds:
+ description: "KubeCloudControllerCreds is a reference to
+ a secret containing cloud credentials with permissions
+ matching the cloud controller policy. This field is immutable.
+ Once set, It can't be changed. \n TODO(dan): document
+ the \"cloud controller policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ nodePoolManagementCreds:
+ description: "NodePoolManagementCreds is a reference to
+ a secret containing cloud credentials with permissions
+ matching the node pool management policy. This field is
+ immutable. Once set, It can't be changed. \n TODO(dan):
+ document the \"node pool management policy\""
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ region:
+ description: Region is the IBMCloud region in which the
+ cluster resides. This configures the OCP control plane
+ cloud integrations, and is used by NodePool to resolve
+ the correct boot image for a given release. This field
+ is immutable. Once set, It can't be changed.
+ type: string
+ resourceGroup:
+ description: ResourceGroup is the IBMCloud Resource Group
+ in which the cluster resides. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ serviceInstanceID:
+ description: "ServiceInstance is the reference to the Power
+ VS service on which the server instance(VM) will be created.
+ Power VS service is a container for all Power VS instances
+ at a specific geographic region. serviceInstance can be
+ created via IBM Cloud catalog or CLI. ServiceInstanceID
+ is the unique identifier that can be obtained from IBM
+ Cloud UI or IBM Cloud cli. \n More detail about Power
+ VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ \n This field is immutable. Once set, It can't be changed."
+ type: string
+ storageOperatorCloudCreds:
+ description: StorageOperatorCloudCreds is a reference to
+ a secret containing ibm cloud credentials for storage
+ operator to get authenticated with ibm cloud.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ subnet:
+ description: Subnet is the subnet to use for control plane
+ cloud resources. This field is immutable. Once set, It
+ can't be changed.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ vpc:
+ description: VPC specifies IBM Cloud PowerVS Load Balancing
+ configuration for the control plane. This field is immutable.
+ Once set, It can't be changed.
+ properties:
+ name:
+ description: Name for VPC to used for all the service
+ load balancer. This field is immutable. Once set,
+ It can't be changed.
+ type: string
+ region:
+ description: Region is the IBMCloud region in which
+ VPC gets created, this VPC used for all the ingress
+ traffic into the OCP cluster. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ subnet:
+ description: Subnet is the subnet to use for load balancer.
+ This field is immutable. Once set, It can't be changed.
+ type: string
+ zone:
+ description: Zone is the availability zone where load
+ balancer cloud resources are created. This field is
+ immutable. Once set, It can't be changed.
+ type: string
+ required:
+ - name
+ - region
+ type: object
+ zone:
+ description: Zone is the availability zone where control
+ plane cloud resources are created. This field is immutable.
+ Once set, It can't be changed.
+ type: string
+ required:
+ - accountID
+ - cisInstanceCRN
+ - ingressOperatorCloudCreds
+ - kubeCloudControllerCreds
+ - nodePoolManagementCreds
+ - region
+ - resourceGroup
+ - serviceInstanceID
+ - storageOperatorCloudCreds
+ - subnet
+ - vpc
+ - zone
+ type: object
+ type:
+ description: Type is the type of infrastructure provider for
+ the cluster.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ pullSecret:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ releaseImage:
+ description: ReleaseImage is the release image applied to the hosted
+ control plane.
+ type: string
+ secretEncryption:
+ description: SecretEncryption contains metadata about the kubernetes
+ secret encryption strategy being used for the cluster when applicable.
+ properties:
+ aescbc:
+ description: AESCBC defines metadata about the AESCBC secret
+ encryption strategy
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to encrypt
+ new secrets
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ backupKey:
+ description: BackupKey defines the old key during the rotation
+ process so previously created secrets can continue to
+ be decrypted until they are all re-encrypted with the
+ active key.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - activeKey
+ type: object
+ kms:
+ description: KMS defines metadata about the kms secret encryption
+ strategy
+ properties:
+ aws:
+ description: AWS defines metadata about the configuration
+ of the AWS KMS Secret Encryption provider
+ properties:
+ activeKey:
+ description: ActiveKey defines the active key used to
+ encrypt new secrets
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for
+ the encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ auth:
+ description: Auth defines metadata about the management
+ of credentials used to interact with AWS KMS
+ properties:
+ credentials:
+ description: Credentials contains the name of the
+ secret that holds the aws credentials that can
+ be used to make the necessary KMS calls. It should
+ at key AWSCredentialsFileSecretKey contain the
+ aws credentials file that can be used to configure
+ AWS SDKs
+ properties:
+ name:
+ description: 'Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ backupKey:
+ description: BackupKey defines the old key during the
+ rotation process so previously created secrets can
+ continue to be decrypted until they are all re-encrypted
+ with the active key.
+ properties:
+ arn:
+ description: ARN is the Amazon Resource Name for
+ the encryption key
+ pattern: '^arn:'
+ type: string
+ required:
+ - arn
+ type: object
+ region:
+ description: Region contains the AWS region
+ type: string
+ required:
+ - activeKey
+ - auth
+ - region
+ type: object
+ ibmcloud:
+ description: IBMCloud defines metadata for the IBM Cloud
+ KMS encryption strategy
+ properties:
+ auth:
+ description: Auth defines metadata for how authentication
+ is done with IBM Cloud KMS
+ properties:
+ managed:
+ description: Managed defines metadata around the
+ service to service authentication strategy for
+ the IBM Cloud KMS system (all provider managed).
+ type: object
+ type:
+ description: Type defines the IBM Cloud KMS authentication
+ strategy
+ enum:
+ - Managed
+ - Unmanaged
+ type: string
+ unmanaged:
+ description: Unmanaged defines the auth metadata
+ the customer provides to interact with IBM Cloud
+ KMS
+ properties:
+ credentials:
+ description: Credentials should reference a
+ secret with a key field of IBMCloudIAMAPIKeySecretKey
+ that contains a apikey to call IBM Cloud KMS
+ APIs
+ properties:
+ name:
+ description: 'Name of the referent. More
+ info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
+ kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - credentials
+ type: object
+ required:
+ - type
+ type: object
+ keyList:
+ description: KeyList defines the list of keys used for
+ data encryption
+ items:
+ description: IBMCloudKMSKeyEntry defines metadata
+ for an IBM Cloud KMS encryption key
+ properties:
+ correlationID:
+ description: CorrelationID is an identifier used
+ to track all api call usage from hypershift
+ type: string
+ crkID:
+ description: CRKID is the customer rook key id
+ type: string
+ instanceID:
+ description: InstanceID is the id for the key
+ protect instance
+ type: string
+ keyVersion:
+ description: KeyVersion is a unique number associated
+ with the key. The number increments whenever
+ a new key is enabled for data encryption.
+ type: integer
+ url:
+ description: URL is the url to call key protect
+ apis over
+ pattern: ^https://
+ type: string
+ required:
+ - correlationID
+ - crkID
+ - instanceID
+ - keyVersion
+ - url
+ type: object
+ type: array
+ region:
+ description: Region is the IBM Cloud region
+ type: string
+ required:
+ - auth
+ - keyList
+ - region
+ type: object
+ provider:
+ description: Provider defines the KMS provider
+ enum:
+ - IBMCloud
+ - AWS
+ type: string
+ required:
+ - provider
+ type: object
+ type:
+ description: Type defines the type of kube secret encryption
+ being used
+ enum:
+ - kms
+ - aescbc
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountSigningKey:
+ description: ServiceAccountSigningKey is a reference to a secret
+ containing the private key used by the service account token issuer.
+ The secret is expected to contain a single key named "key". If
+ not specified, a service account signing key will be generated
+ automatically for the cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ services:
+ description: Services defines metadata about how control plane services
+ are published in the management cluster.
+ items:
+ description: ServicePublishingStrategyMapping specifies how individual
+ control plane services are published from the hosting cluster
+ of a control plane.
+ properties:
+ service:
+ description: Service identifies the type of service being
+ published.
+ enum:
+ - APIServer
+ - OAuthServer
+ - OIDC
+ - Konnectivity
+ - Ignition
+ - OVNSbDb
+ type: string
+ servicePublishingStrategy:
+ description: ServicePublishingStrategy specifies how to publish
+ Service.
+ properties:
+ loadBalancer:
+ description: LoadBalancer configures exposing a service
+ using a LoadBalancer.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the LoadBalancer.
+ type: string
+ type: object
+ nodePort:
+ description: NodePort configures exposing a service using
+ a NodePort.
+ properties:
+ address:
+ description: Address is the host/ip that the NodePort
+ service is exposed over.
+ type: string
+ port:
+ description: Port is the port of the NodePort service.
+ If <=0, the port is dynamically assigned when the
+ service is created.
+ format: int32
+ type: integer
+ required:
+ - address
+ type: object
+ route:
+ description: Route configures exposing a service using
+ a Route.
+ properties:
+ hostname:
+ description: Hostname is the name of the DNS record
+ that will be created pointing to the Route.
+ type: string
+ type: object
+ type:
+ description: Type is the publishing strategy used for
+ the service.
+ enum:
+ - LoadBalancer
+ - NodePort
+ - Route
+ - None
+ type: string
+ required:
+ - type
+ type: object
+ required:
+ - service
+ - servicePublishingStrategy
+ type: object
+ type: array
+ sshKey:
+ description: LocalObjectReference contains enough information to
+ let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - dns
+ - etcd
+ - infraID
+ - issuerURL
+ - platform
+ - pullSecret
+ - releaseImage
+ - services
+ - sshKey
+ type: object
+ status:
+ description: HostedControlPlaneStatus defines the observed state of
+ HostedControlPlane
+ properties:
+ conditions:
+ description: 'Condition contains details for one aspect of the current
+ state of the HostedControlPlane. Current condition types are:
+ "Available"'
+ items:
+ description: "Condition contains details for one aspect of the
+ current state of this API Resource. --- This struct is intended
+ for direct use as an array at the field path .status.conditions.
+ \ For example, type FooStatus struct{ // Represents the observations
+ of a foo's current state. // Known .status.conditions.type are:
+ \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
+ // +patchStrategy=merge // +listType=map // +listMapKey=type
+ Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be
+ when the underlying condition changed. If that is not known,
+ then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if
+ .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False,
+ Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict
+ is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ controlPlaneEndpoint:
+ description: ControlPlaneEndpoint contains the endpoint information
+ by which external clients can access the control plane. This
+ is populated after the infrastructure is ready.
+ properties:
+ host:
+ description: Host is the hostname on which the API server is
+ serving.
+ type: string
+ port:
+ description: Port is the port on which the API server is serving.
+ format: int32
+ type: integer
+ required:
+ - host
+ - port
+ type: object
+ externalManagedControlPlane:
+ default: true
+ description: ExternalManagedControlPlane indicates to cluster-api
+ that the control plane is managed by an external service. https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468
+ type: boolean
+ initialized:
+ default: false
+ description: Initialized denotes whether or not the control plane
+ has provided a kubeadm-config. Once this condition is marked true,
+ its value is never changed. See the Ready condition for an indication
+ of the current readiness of the cluster's control plane. This
+ satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252
+ type: boolean
+ kubeConfig:
+ description: KubeConfig is a reference to the secret containing
+ the default kubeconfig for this control plane.
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ kubeadminPassword:
+ description: KubeadminPassword is a reference to the secret containing
+ the initial kubeadmin password for the guest cluster.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ lastReleaseImageTransitionTime:
+ description: "lastReleaseImageTransitionTime is the time of the
+ last update to the current releaseImage property. \n Deprecated:
+ Use versionStatus.history[0].startedTime instead."
+ format: date-time
+ type: string
+ oauthCallbackURLTemplate:
+ description: OAuthCallbackURLTemplate contains a template for the
+ URL to use as a callback for identity providers. The [identity-provider-name]
+ placeholder must be replaced with the name of an identity provider
+ defined on the HostedCluster. This is populated after the infrastructure
+ is ready.
+ type: string
+ ready:
+ default: false
+ description: Ready denotes that the HostedControlPlane API Server
+ is ready to receive requests This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230
+ type: boolean
+ releaseImage:
+ description: "ReleaseImage is the release image applied to the hosted
+ control plane. \n Deprecated: Use versionStatus.desired.image
+ instead."
+ type: string
+ version:
+ description: "Version is the semantic version of the release applied
+ by the hosted control plane operator \n Deprecated: Use versionStatus.desired.version
+ instead."
+ type: string
+ versionStatus:
+ description: versionStatus is the status of the release version
+ applied by the hosted control plane operator.
+ properties:
+ availableUpdates:
+ description: availableUpdates contains updates recommended for
+ this cluster. Updates which appear in conditionalUpdates but
+ not in availableUpdates may expose this cluster to known issues.
+ This list may be empty if no updates are recommended, if the
+ update service is unavailable, or if an invalid channel has
+ been specified.
+ items:
+ description: Release represents an OpenShift release image
+ and associated metadata.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of spec,
+ image is optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a
+ release or the metadata returned by the update API and
+ should be displayed as a link in user interfaces. The
+ URL field may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ nullable: true
+ type: array
+ conditionalUpdates:
+ description: conditionalUpdates contains the list of updates
+ that may be recommended for this cluster if it meets specific
+ required conditions. Consumers interested in the set of updates
+ that are actually recommended for this cluster should use
+ availableUpdates. This list may be empty if no updates are
+ recommended, if the update service is unavailable, or if an
+ empty or invalid channel has been specified.
+ items:
+ description: ConditionalUpdate represents an update which
+ is recommended to some clusters on the version the current
+ cluster is reconciling, but which may not be recommended
+ for the current cluster.
+ properties:
+ conditions:
+ description: 'conditions represents the observations of
+ the conditional update''s current status. Known types
+ are: * Evaluating, for whether the cluster-version operator
+ will attempt to evaluate any risks[].matchingRules.
+ * Recommended, for whether the update is recommended
+ for the current cluster.'
+ items:
+ description: "Condition contains details for one aspect
+ of the current state of this API Resource. --- This
+ struct is intended for direct use as an array at the
+ field path .status.conditions. For example, type
+ FooStatus struct{ // Represents the observations of
+ a foo's current state. // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"
+ // +patchMergeKey=type // +patchStrategy=merge //
+ +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\"
+ patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
+ \n // other fields }"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time
+ the condition transitioned from one status to
+ another. This should be when the underlying condition
+ changed. If that is not known, then using the
+ time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message
+ indicating details about the transition. This
+ may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance,
+ if .metadata.generation is currently 12, but the
+ .status.conditions[x].observedGeneration is 9,
+ the condition is out of date with respect to the
+ current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier
+ indicating the reason for the condition's last
+ transition. Producers of specific condition types
+ may define expected values and meanings for this
+ field, and whether the values are considered a
+ guaranteed API. The value should be a CamelCase
+ string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True,
+ False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in
+ foo.example.com/CamelCase. --- Many .condition.type
+ values are consistent across resources like Available,
+ but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to
+ deconflict is important. The regex it matches
+ is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ release:
+ description: release is the target of the update.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that
+ contains the update. When this field is part of
+ spec, image is optional if version is specified
+ and the availableUpdates field contains a matching
+ version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on
+ a release or the metadata returned by the update
+ API and should be displayed as a link in user interfaces.
+ The URL field may not be set for test or nightly
+ releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec,
+ version is optional if image is specified.
+ type: string
+ type: object
+ risks:
+ description: risks represents the range of issues associated
+ with updating to the target release. The cluster-version
+ operator will evaluate all entries, and only recommend
+ the update if there is at least one entry and all entries
+ recommend the update.
+ items:
+ description: ConditionalUpdateRisk represents a reason
+ and cluster-state for not recommending a conditional
+ update.
+ properties:
+ matchingRules:
+ description: matchingRules is a slice of conditions
+ for deciding which clusters match the risk and
+ which do not. The slice is ordered by decreasing
+ precedence. The cluster-version operator will
+ walk the slice in order, and stop after the first
+ it can successfully evaluate. If no condition
+ can be successfully evaluated, the update will
+ not be recommended.
+ items:
+ description: ClusterCondition is a union of typed
+ cluster conditions. The 'type' property determines
+ which of the type-specific properties are relevant.
+ When evaluated on a cluster, the condition may
+ match, not match, or fail to evaluate.
+ properties:
+ promql:
+ description: promQL represents a cluster condition
+ based on PromQL.
+ properties:
+ promql:
+ description: PromQL is a PromQL query
+ classifying clusters. This query query
+ should return a 1 in the match case
+ and a 0 in the does-not-match case.
+ Queries which return no time series,
+ or which return values besides 0 or
+ 1, are evaluation failures.
+ type: string
+ required:
+ - promql
+ type: object
+ type:
+ description: type represents the cluster-condition
+ type. This defines the members and semantics
+ of any additional properties.
+ enum:
+ - Always
+ - PromQL
+ type: string
+ required:
+ - type
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ message:
+ description: message provides additional information
+ about the risk of updating, in the event that
+ matchingRules match the cluster state. This is
+ only to be consumed by humans. It may contain
+ Line Feed characters (U+000A), which should be
+ rendered as new lines.
+ minLength: 1
+ type: string
+ name:
+ description: name is the CamelCase reason for not
+ recommending a conditional update, in the event
+ that matchingRules match the cluster state.
+ minLength: 1
+ type: string
+ url:
+ description: url contains information about this
+ risk.
+ format: uri
+ minLength: 1
+ type: string
+ required:
+ - matchingRules
+ - message
+ - name
+ - url
+ type: object
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - release
+ - risks
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired
+ will be set with the information available, which may be an
+ image or a tag.
+ properties:
+ channels:
+ description: channels is the set of Cincinnati channels
+ to which the release currently belongs.
+ items:
+ type: string
+ type: array
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is
+ optional if version is specified and the availableUpdates
+ field contains a matching version.
+ type: string
+ url:
+ description: url contains information about this release.
+ This URL is set by the 'url' metadata property on a release
+ or the metadata returned by the update API and should
+ be displayed as a link in user interfaces. The URL field
+ may not be set for test or nightly releases.
+ type: string
+ version:
+ description: version is a semantic versioning identifying
+ the update version. When this field is part of spec, version
+ is optional if image is specified.
+ type: string
+ type: object
+ history:
+ description: history contains a list of the most recent versions
+ applied to the cluster. This value may be empty during cluster
+ startup, and then will be updated when a new update is being
+ applied. The newest update is first in the list and it is
+ ordered by recency. Updates in the history have state Completed
+ if the rollout completed - if an update was failing or halfway
+ applied the state will be Partial. Only a limited amount of
+ update history is preserved.
+ items:
+ description: UpdateHistory is a single attempted update to
+ the cluster.
+ properties:
+ acceptedRisks:
+ description: acceptedRisks records risks which were accepted
+ to initiate the update. For example, it may menition
+ an Upgradeable=False or missing signature that was overriden
+ via desiredUpdate.force, or an update that was initiated
+ despite not being in the availableUpdates set of recommended
+ update targets.
+ type: string
+ completionTime:
+ description: completionTime, if set, is when the update
+ was fully applied. The update that is currently being
+ applied will have a null completion time. Completion
+ time will always be set for entries that are not the
+ current update (usually to the started time of the next
+ update).
+ format: date-time
+ nullable: true
+ type: string
+ image:
+ description: image is a container image location that
+ contains the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update
+ was started.
+ format: date-time
+ type: string
+ state:
+ description: state reflects whether the update was fully
+ applied. The Partial state indicates the update is not
+ fully applied, while the Completed state indicates the
+ update was successfully rolled out at least once (all
+ parts of the update successfully applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update
+ was properly verified before it was installed. If this
+ is false the cluster may not be trusted. Verified does
+ not cover upgradeable checks that depend on the cluster
+ state at the time when the update target was accepted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying
+ the update version. If the requested image does not
+ define a version, or if a failure occurs retrieving
+ the image, this value may be empty.
+ type: string
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ type: object
+ type: array
+ observedGeneration:
+ description: observedGeneration reports which version of the
+ spec is being synced. If this value is not equal to metadata.generation,
+ then the desired and conditions fields may represent a previous
+ version.
+ format: int64
+ type: integer
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ type: object
+ required:
+ - initialized
+ - ready
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: null
+ storedVersions: null
+- apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ annotations:
+ service.beta.openshift.io/inject-cabundle: "true"
+ creationTimestamp: null
+ name: nodepools.hypershift.openshift.io
+ spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: operator
+ namespace: ${NAMESPACE}
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1beta1
+ - v1alpha1
+ group: hypershift.openshift.io
+ names:
+ kind: NodePool
+ listKind: NodePoolList
+ plural: nodepools
+ shortNames:
+ - np
+ - nps
+ singular: nodepool
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Cluster
+ jsonPath: .spec.clusterName
+ name: Cluster
+ type: string
+ - description: Desired Nodes
+ jsonPath: .spec.replicas
+ name: Desired Nodes
+ type: integer
+ - description: Available Nodes
+ jsonPath: .status.replicas
+ name: Current Nodes
+ type: integer
+ - description: Autoscaling Enabled
+ jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status
+ name: Autoscaling
+ type: string
+ - description: Node Autorepair Enabled
+ jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status
+ name: Autorepair
+ type: string
+ - description: Current version
+ jsonPath: .status.version
+ name: Version
+ type: string
+ - description: UpdatingVersion in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status
+ name: UpdatingVersion
+ type: string
+ - description: UpdatingConfig in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status
+ name: UpdatingConfig
+ type: string
+ - description: Message
+ jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Message
+ type: string
+ deprecated: true
+ deprecationWarning: v1alpha1 is a deprecated version for NodePool
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: NodePool is a scalable set of worker nodes attached to a HostedCluster.
+ NodePool machine architectures are uniform within a given pool, and are
+ independent of the control plane’s underlying machine architecture.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase. More
+ info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec is the desired behavior of the NodePool.
+ properties:
+ autoScaling:
+ description: Autoscaling specifies auto-scaling behavior for the
+ NodePool.
+ properties:
+ max:
+ description: Max is the maximum number of nodes allowed in the
+ pool. Must be >= 1.
+ format: int32
+ minimum: 1
+ type: integer
+ min:
+ description: Min is the minimum number of nodes to maintain
+ in the pool. Must be >= 1.
+ format: int32
+ minimum: 1
+ type: integer
+ required:
+ - max
+ - min
+ type: object
+ clusterName:
+ description: "ClusterName is the name of the HostedCluster this
+ NodePool belongs to. \n TODO(dan): Should this be a LocalObjectReference?"
+ type: string
+ config:
+ description: "Config is a list of references to ConfigMaps containing
+ serialized MachineConfig resources to be injected into the ignition
+ configurations of nodes in the NodePool. The MachineConfig API
+ schema is defined here: \n https://github.com/openshift/machine-config-operator/blob/18963e4f8fe66e8c513ca4b131620760a414997f/pkg/apis/machineconfiguration.openshift.io/v1/types.go#L185
+ \n Each ConfigMap must have a single key named \"config\" whose
+ value is the JSON or YAML of a serialized MachineConfig."
+ items:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ management:
+ description: Management specifies behavior for managing nodes in
+ the pool, such as upgrade strategies and auto-repair behaviors.
+ properties:
+ autoRepair:
+ description: AutoRepair specifies whether health checks should
+ be enabled for machines in the NodePool. The default is false.
+ type: boolean
+ inPlace:
+ description: InPlace is the configuration for in-place upgrades.
+ type: object
+ replace:
+ default:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ strategy: RollingUpdate
+ description: Replace is the configuration for rolling upgrades.
+ properties:
+ rollingUpdate:
+ description: RollingUpdate specifies a rolling update strategy
+ which upgrades nodes by creating new nodes and deleting
+ the old ones.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxSurge is the maximum number of nodes
+ that can be provisioned above the desired number of
+ nodes. \n Value can be an absolute number (ex: 5)
+ or a percentage of desired nodes (ex: 10%). \n Absolute
+ number is calculated from percentage by rounding up.
+ \n This can not be 0 if MaxUnavailable is 0. \n Defaults
+ to 1. \n Example: when this is set to 30%, new nodes
+ can be provisioned immediately when the rolling update
+ starts, such that the total number of old and new
+ nodes do not exceed 130% of desired nodes. Once old
+ nodes have been deleted, new nodes can be provisioned,
+ ensuring that total number of nodes running at any
+ time during the update is at most 130% of desired
+ nodes."
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxUnavailable is the maximum number of
+ nodes that can be unavailable during the update. \n
+ Value can be an absolute number (ex: 5) or a percentage
+ of desired nodes (ex: 10%). \n Absolute number is
+ calculated from percentage by rounding down. \n This
+ can not be 0 if MaxSurge is 0. \n Defaults to 0. \n
+ Example: when this is set to 30%, old nodes can be
+ deleted down to 70% of desired nodes immediately when
+ the rolling update starts. Once new nodes are ready,
+ more old nodes be deleted, followed by provisioning
+ new nodes, ensuring that the total number of nodes
+ available at all times during the update is at least
+ 70% of desired nodes."
+ x-kubernetes-int-or-string: true
+ type: object
+ strategy:
+ description: Strategy is the node replacement strategy for
+ nodes in the pool.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
+ upgradeType:
+ description: UpgradeType specifies the type of strategy for
+ handling upgrades.
+ enum:
+ - Replace
+ - InPlace
+ type: string
+ required:
+ - upgradeType
+ type: object
+ nodeCount:
+ description: 'Deprecated: Use Replicas instead. NodeCount will be
+ dropped in the next api release.'
+ format: int32
+ type: integer
+ nodeDrainTimeout:
+ description: 'NodeDrainTimeout is the total amount of time that
+ the controller will spend on draining a node. The default value
+ is 0, meaning that the node can be drained without any time limitations.
+ NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
+ TODO (alberto): Today changing this field will trigger a recreate
+ rolling update, which kind of defeats the purpose of the change.
+ In future we plan to propagate this field in-place. https://github.com/kubernetes-sigs/cluster-api/issues/5880'
+ type: string
+ platform:
+ description: Platform specifies the underlying infrastructure provider
+ for the NodePool and is used to configure platform specific behavior.
+ properties:
+ agent:
+ description: Agent specifies the configuration used when using
+ Agent platform.
+ properties:
+ agentLabelSelector:
+ description: AgentLabelSelector contains labels that must
+ be set on an Agent in order to be selected for a Machine.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be empty.
+ This array is replaced during a strategic merge
+ patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ aws:
+ description: AWS specifies the configuration used when operating
+ on AWS.
+ properties:
+ ami:
+ description: AMI is the image id to use for node instances.
+ If unspecified, the default is chosen based on the NodePool
+ release payload image.
+ type: string
+ instanceProfile:
+ description: InstanceProfile is the AWS EC2 instance profile,
+ which is a container for an IAM role that the EC2 instance
+ uses.
+ type: string
+ instanceType:
+ description: InstanceType is an ec2 instance type for node
+ instances (e.g. m5.large).
+ type: string
+ resourceTags:
+ description: "ResourceTags is an optional list of additional
+ tags to apply to AWS node instances. \n These will be
+ merged with HostedCluster scoped tags, and HostedCluster
+ tags take precedence in case of conflicts. \n See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
+ for information on tagging AWS resources. AWS supports
+ a maximum of 50 tags per resource. OpenShift reserves
+ 25 tags for its use, leaving 25 tags available for the
+ user."
+ items:
+ description: AWSResourceTag is a tag to apply to AWS resources
+ created for the cluster.
+ properties:
+ key:
+ description: Key is the key of the tag.
+ maxLength: 128
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ value:
+ description: "Value is the value of the tag. \n Some
+ AWS service do not support empty values. Since tags
+ are added to resources in many services, the length
+ of the tag value must meet the requirements of all
+ services."
+ maxLength: 256
+ minLength: 1
+ pattern: ^[0-9A-Za-z_.:/=+-@]+$
+ type: string
+ required:
+ - key
+ - value
+ type: object
+ maxItems: 25
+ type: array
+ rootVolume:
+ description: RootVolume specifies configuration for the
+ root volume of node instances.
+ properties:
+ iops:
+ description: IOPS is the number of IOPS requested for
+ the disk. This is only valid for type io1.
+ format: int64
+ type: integer
+ size:
+ description: "Size specifies size (in Gi) of the storage
+ device. \n Must be greater than the image snapshot
+ size or 8 (whichever is greater)."
+ format: int64
+ minimum: 8
+ type: integer
+ type:
+ description: Type is the type of the volume.
+ type: string
+ required:
+ - size
+ - type
+ type: object
+ securityGroups:
+ description: SecurityGroups is an optional set of security
+ groups to associate with node instances.
+ items:
+ description: AWSResourceReference is a reference to a
+ specific AWS resource by ID, ARN, or filters. Only one
+ of ID, ARN or Filters may be specified. Specifying more
+ than one will result in a validation error.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs
+ used to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify
+ an AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ type: array
+ subnet:
+ description: Subnet is the subnet to use for node instances.
+ properties:
+ arn:
+ description: ARN of resource
+ type: string
+ filters:
+ description: 'Filters is a set of key/value pairs used
+ to identify a resource They are applied according
+ to the rules defined by the AWS API: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html'
+ items:
+ description: Filter is a filter used to identify an
+ AWS resource
+ properties:
+ name:
+ description: Name of the filter. Filter names
+ are case-sensitive.
+ type: string
+ values:
+ description: Values includes one or more filter
+ values. Filter values are case-sensitive.
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - values
+ type: object
+ type: array
+ id:
+ description: ID of resource
+ type: string
+ type: object
+ required:
+ - instanceType
+ type: object
+ azure:
+ properties:
+ availabilityZone:
+ description: AvailabilityZone of the nodepool. Must not
+ be specified for clusters in a location that does not
+ support AvailabilityZone.
+ type: string
+ diskSizeGB:
+ default: 120
+ format: int32
+ minimum: 16
+ type: integer
+ diskStorageAccountType:
+ default: Premium_LRS
+ description: "DiskStorageAccountType is the disk storage
+ account type to use. Valid values are: * Standard_LRS:
+ HDD * StandardSSD_LRS: Standard SSD * Premium_LRS: Premium
+ SDD * UltraSSD_LRS: Ultra SDD \n Defaults to Premium_LRS.
+ For more details, visit the Azure documentation: https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison"
+ enum:
+ - Standard_LRS
+ - StandardSSD_LRS
+ - Premium_LRS
+ - UltraSSD_LRS
+ type: string
+ imageID:
+ description: 'ImageID is the id of the image to boot from.
+ If unset, the default image at the location below will
+ be used: subscription/$subscriptionID/resourceGroups/$resourceGroupName/providers/Microsoft.Compute/images/rhcos.x86_64.vhd'
+ type: string
+ vmsize:
+ type: string
+ required:
+ - vmsize
+ type: object
+ ibmcloud:
+ description: IBMCloud defines IBMCloud specific settings for
+ components
+ properties:
+ providerType:
+ description: ProviderType is a specific supported infrastructure
+ provider within IBM Cloud.
+ type: string
+ type: object
+ kubevirt:
+ description: Kubevirt specifies the configuration used when
+ operating on KubeVirt platform.
+ properties:
+ compute:
+ default:
+ cores: 2
+ memory: 4Gi
+ description: Compute contains values representing the virtual
+ hardware requested for the VM
+ properties:
+ cores:
+ default: 2
+ description: Cores represents how many cores the guest
+ VM should have
+ format: int32
+ type: integer
+ memory:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 4Gi
+ description: Memory represents how much guest memory
+ the VM should have
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ rootVolume:
+ description: RootVolume represents values associated with
+ the VM volume that will host rhcos
+ properties:
+ diskImage:
+ description: Image represents what rhcos image to use
+ for the node pool
+ properties:
+ containerDiskImage:
+ description: ContainerDiskImage is a string representing
+ the container image that holds the root disk
+ type: string
+ type: object
+ persistent:
+ description: Persistent volume type means the VM's storage
+ is backed by a PVC VMs that use persistent volumes
+ can survive disruption events like restart and eviction
+ This is the default type used when no storage type
+ is defined.
+ properties:
+ accessModes:
+ description: 'AccessModes is an array that contains
+ the desired Access Modes the root volume should
+ have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes'
+ items:
+ enum:
+ - ReadWriteOnce
+ - ReadWriteMany
+ - ReadOnly
+ - ReadWriteOncePod
+ type: string
+ type: array
+ size:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 16Gi
+ description: Size is the size of the persistent
+ storage volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageClass:
+ description: StorageClass is the storageClass used
+ for the underlying PVC that hosts the volume
+ type: string
+ type: object
+ type:
+ default: Persistent
+ description: Type represents the type of storage to
+ associate with the kubevirt VMs.
+ enum:
+ - Persistent
+ type: string
+ type: object
+ required:
+ - rootVolume
+ type: object
+ powervs:
+ description: PowerVS specifies the configuration used when using
+ IBMCloud PowerVS platform.
+ properties:
+ image:
+ description: Image used for deploying the nodes. If unspecified,
+ the default is chosen based on the NodePool release payload
+ image.
+ properties:
+ id:
+ description: ID of resource
+ type: string
+ name:
+ description: Name of resource
+ type: string
+ type: object
+ imageDeletePolicy:
+ default: delete
+ description: "ImageDeletePolicy is policy for the image
+ deletion. \n delete: delete the image from the infrastructure.
+ retain: delete the image from the openshift but retain
+ in the infrastructure. \n The default is delete"
+ enum:
+ - delete
+ - retain
+ type: string
+ memoryGiB:
+ default: 32
+ description: "MemoryGiB is the size of a virtual machine's
+ memory, in GiB. maximum value for the MemoryGiB depends
+ on the selected SystemType. when SystemType is set to
+ e880 maximum MemoryGiB value is 7463 GiB. when SystemType
+ is set to e980 maximum MemoryGiB value is 15307 GiB. when
+ SystemType is set to s922 maximum MemoryGiB value is 942
+ GiB. The minimum memory is 32 GiB. \n When omitted, this
+ means the user has no opinion and the platform is left
+ to choose a reasonable default. The current default is
+ 32."
+ format: int32
+ type: integer
+ processorType:
+ default: shared
+ description: "ProcessorType is the VM instance processor
+ type. It must be set to one of the following values: Dedicated,
+ Capped or Shared. \n Dedicated: resources are allocated
+ for a specific client, The hypervisor makes a 1:1 binding
+ of a partition’s processor to a physical processor core.
+ Shared: Shared among other clients. Capped: Shared, but
+ resources do not expand beyond those that are requested,
+ the amount of CPU time is Capped to the value specified
+ for the entitlement. \n if the processorType is selected
+ as Dedicated, then Processors value cannot be fractional.
+ When omitted, this means that the user has no opinion
+ and the platform is left to choose a reasonable default.
+ The current default is Shared."
+ enum:
+ - dedicated
+ - shared
+ - capped
+ type: string
+ processors:
+ anyOf:
+ - type: integer
+ - type: string
+ default: "0.5"
+ description: Processors is the number of virtual processors
+ in a virtual machine. when the processorType is selected
+ as Dedicated the processors value cannot be fractional.
+ maximum value for the Processors depends on the selected
+ SystemType. when SystemType is set to e880 or e980 maximum
+ Processors value is 143. when SystemType is set to s922
+ maximum Processors value is 15. minimum value for Processors
+ depends on the selected ProcessorType. when ProcessorType
+ is set as Shared or Capped, The minimum processors is
+ 0.5. when ProcessorType is set as Dedicated, The minimum
+ processors is 1. When omitted, this means that the user
+ has no opinion and the platform is left to choose a reasonable
+ default. The default is set based on the selected ProcessorType.
+ when ProcessorType selected as Dedicated, the default
+ is set to 1. when ProcessorType selected as Shared or
+ Capped, the default is set to 0.5.
+ x-kubernetes-int-or-string: true
+ storageType:
+ default: tier1
+ description: "StorageType for the image and nodes, this
+ will be ignored if Image is specified. The storage tiers
+ in PowerVS are based on I/O operations per second (IOPS).
+ It means that the performance of your storage volumes
+ is limited to the maximum number of IOPS based on volume
+ size and storage tier. Although, the exact numbers might
+ change over time, the Tier 3 storage is currently set
+ to 3 IOPS/GB, and the Tier 1 storage is currently set
+ to 10 IOPS/GB. \n The default is tier1"
+ enum:
+ - tier1
+ - tier3
+ type: string
+ systemType:
+ default: s922
+ description: SystemType is the System type used to host
+ the instance. systemType determines the number of cores
+ and memory that is available. Few of the supported SystemTypes
+ are s922,e880,e980. e880 systemType available only in
+ Dallas Datacenters. e980 systemType available in Datacenters
+ except Dallas and Washington. When omitted, this means
+ that the user has no opinion and the platform is left
+ to choose a reasonable default. The current default is
+ s922 which is generally available.
+ type: string
+ type: object
+ type:
+ description: Type specifies the platform name.
+ enum:
+ - AWS
+ - None
+ - IBMCloud
+ - Agent
+ - KubeVirt
+ - Azure
+ - PowerVS
+ type: string
+ required:
+ - type
+ type: object
+ release:
+ description: Release specifies the OCP release used for the NodePool.
+ This informs the ignition configuration for machines, as well
+ as other platform specific machine properties (e.g. an AMI on
+ the AWS platform).
+ properties:
+ image:
+ description: Image is the image pullspec of an OCP release payload
+ image.
+ pattern: ^(\w+\S+)$
+ type: string
+ required:
+ - image
+ type: object
+ replicas:
+ description: Replicas is the desired number of nodes the pool should
+ maintain. If unset, the default value is 0.
+ format: int32
+ type: integer
+ required:
+ - clusterName
+ - management
+ - platform
+ - release
+ type: object
+ status:
+ description: Status is the latest observed status of the NodePool.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of the node pool's current state.
+ items:
+ description: We define our own condition type since metav1.Condition
+ has validation for Reason that might be broken by what we bubble
+ up from CAPI. NodePoolCondition defines an observation of NodePool
+ resource operational state.
+ properties:
+ lastTransitionTime:
+ description: Last time the condition transitioned from one
+ status to another. This should be when the underlying condition
+ changed. If that is not known, then using the time when
+ the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: A human readable message indicating details about
+ the transition. This field may be empty.
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: The reason for the condition's last transition
+ in CamelCase. The specific API may choose whether or not
+ this field is considered a guaranteed API. This field may
+ not be empty.
+ type: string
+ severity:
+ description: Severity provides an explicit classification
+ of Reason code, so the users or machines can immediately
+ understand the current situation and act accordingly. The
+ Severity field MUST be set only when Status=False.
+ type: string
+ status:
+ description: Status of the condition, one of True, False,
+ Unknown.
+ type: string
+ type:
+ description: Type of condition in CamelCase or in foo.example.com/CamelCase.
+ Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict
+ is important.
+ type: string
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ type: object
+ type: array
+ replicas:
+ description: Replicas is the latest observed number of nodes in
+ the pool.
+ format: int32
+ type: integer
+ version:
+ description: Version is the semantic version of the latest applied
+ release specified by the NodePool.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ status: {}
+ - additionalPrinterColumns:
+ - description: Cluster
+ jsonPath: .spec.clusterName
+ name: Cluster
+ type: string
+ - description: Desired Nodes
+ jsonPath: .spec.replicas
+ name: Desired Nodes
+ type: integer
+ - description: Available Nodes
+ jsonPath: .status.replicas
+ name: Current Nodes
+ type: integer
+ - description: Autoscaling Enabled
+ jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status
+ name: Autoscaling
+ type: string
+ - description: Node Autorepair Enabled
+ jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status
+ name: Autorepair
+ type: string
+ - description: Current version
+ jsonPath: .status.version
+ name: Version
+ type: string
+ - description: UpdatingVersion in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status
+ name: UpdatingVersion
+ type: string
+ - description: UpdatingConfig in progress
+ jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status
+ name: UpdatingConfig
+ type: string
+ - description: Message
+ jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Message
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: NodePool is a scalable set of worker nodes attached to a HostedCluster.
+ NodePool machine architectures are uniform within a given pool, and are
+ independent of the control plane’s underlying machine architecture.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource
+ this object represents. Servers may infer this from the endpoint the
+ client submits requests to. Cannot be updated. In CamelCase. More
+ info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
metadata:
type: object
spec:
@@ -26760,6 +35394,23 @@ objects:
type: boolean
inPlace:
description: InPlace is the configuration for in-place upgrades.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: "MaxUnavailable is the maximum number of nodes
+ that can be unavailable during the update. \n Value can
+ be an absolute number (ex: 5) or a percentage of desired
+ nodes (ex: 10%). \n Absolute number is calculated from
+ percentage by rounding down. \n Defaults to 1. \n Example:
+ when this is set to 30%, a max of 30% of the nodes can
+ be made unschedulable/unavailable immediately when the
+ update starts. Once a set of nodes is updated, more nodes
+ can be made unschedulable for update, ensuring that the
+ total number of nodes schedulable at all times during
+ the update is at least 70% of desired nodes."
+ x-kubernetes-int-or-string: true
type: object
replace:
default:
@@ -26830,11 +35481,6 @@ objects:
required:
- upgradeType
type: object
- nodeCount:
- description: 'Deprecated: Use Replicas instead. NodeCount will be
- dropped in the next api release.'
- format: int32
- type: integer
nodeDrainTimeout:
description: 'NodeDrainTimeout is the total amount of time that
the controller will spend on draining a node. The default value
@@ -26844,6 +35490,13 @@ objects:
rolling update, which kind of defeats the purpose of the change.
In future we plan to propagate this field in-place. https://github.com/kubernetes-sigs/cluster-api/issues/5880'
type: string
+ pausedUntil:
+ description: 'PausedUntil is a field that can be used to pause reconciliation
+ on a resource. Either a date can be provided in RFC3339 format
+ or a boolean. If a date is provided: reconciliation is paused
+ on the resource until that date. If the boolean true is provided:
+ reconciliation is paused on the resource until the field is removed.'
+ type: string
platform:
description: Platform specifies the underlying infrastructure provider
for the NodePool and is used to configure platform specific behavior.
@@ -27323,6 +35976,24 @@ objects:
maintain. If unset, the default value is 0.
format: int32
type: integer
+ tuningConfig:
+ description: "TuningConfig is a list of references to ConfigMaps
+ containing serialized Tuned resources to define the tuning configuration
+ to be applied to nodes in the NodePool. The Tuned API is defined
+ here: \n https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go
+ \n Each ConfigMap must have a single key named \"tuned\" whose
+ value is the JSON or YAML of a serialized Tuned."
+ items:
+ description: LocalObjectReference contains enough information
+ to let you locate the referenced object inside the same namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
required:
- clusterName
- management
@@ -27394,8 +36065,6 @@ objects:
description: Version is the semantic version of the latest applied
release specified by the NodePool.
type: string
- required:
- - conditions
type: object
type: object
served: true
diff --git a/hack/gen-api-docs.sh b/hack/gen-api-docs.sh
index 4f76f66f788..e501292840d 100755
--- a/hack/gen-api-docs.sh
+++ b/hack/gen-api-docs.sh
@@ -24,5 +24,5 @@ export GO111MODULE="off"
${GEN_BIN} \
--config "${FAKE_REPOPATH}/docs/api-doc-gen/config.json" \
--template-dir "${FAKE_REPOPATH}/docs/api-doc-gen/templates" \
---api-dir ./api/v1alpha1 \
+--api-dir ./api/v1beta1 \
--out-file "${FAKE_REPOPATH}/docs/content/reference/api.md"
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
index 0652d91851a..9a10806390f 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go
@@ -42,8 +42,8 @@ import (
routev1 "github.com/openshift/api/route/v1"
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
"github.com/openshift/hypershift/api"
- "github.com/openshift/hypershift/api/util/ipnet"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ "github.com/openshift/hypershift/api/util/configrefs"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
ignitionserverreconciliation "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/ignitionserver"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform"
@@ -59,7 +59,6 @@ import (
"github.com/openshift/hypershift/support/capabilities"
"github.com/openshift/hypershift/support/certs"
"github.com/openshift/hypershift/support/config"
- "github.com/openshift/hypershift/support/globalconfig"
"github.com/openshift/hypershift/support/images"
"github.com/openshift/hypershift/support/infraid"
"github.com/openshift/hypershift/support/metrics"
@@ -80,9 +79,6 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- kjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -140,7 +136,7 @@ type HostedClusterReconciler struct {
// 2) The OCP version being deployed is the latest version supported by Hypershift
HypershiftOperatorImage string
- // releaseProvider looks up the OCP version for the release images in HostedClusters
+ // ReleaseProvider looks up the OCP version for the release images in HostedClusters
ReleaseProvider releaseinfo.ProviderWithRegistryOverrides
// SetDefaultSecurityContext is used to configure Security Context for containers
@@ -342,29 +338,16 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, nil
}
- // Part zero: Handle deprecated fields.
- // This is done before anything else to prevent other update calls from failing if e.g. they are missing a new required field.
- // TODO (alberto): drop this as we cut beta and only support >= GA clusters.
+ // Part zero: fix up conversion
originalSpec := hcluster.Spec.DeepCopy()
- // Reconcile deprecated global configuration.
- if err := r.reconcileDeprecatedGlobalConfig(ctx, hcluster); err != nil {
- return ctrl.Result{}, err
- }
-
- // Reconcile deprecated AWS roles.
- switch hcluster.Spec.Platform.Type {
- case hyperv1.AWSPlatform:
- if err := r.reconcileDeprecatedAWSRoles(ctx, hcluster); err != nil {
+ // Reconcile converted AWS roles.
+ if hcluster.Spec.Platform.AWS != nil {
+ if err := r.dereferenceAWSRoles(ctx, &hcluster.Spec.Platform.AWS.RolesRef, hcluster.Namespace); err != nil {
return ctrl.Result{}, err
}
}
- // Reconcile deprecated network settings
- if err := r.reconcileDeprecatedNetworkSettings(hcluster); err != nil {
- return ctrl.Result{}, err
- }
-
// Update fields if required.
if !equality.Semantic.DeepEqual(&hcluster.Spec, originalSpec) {
log.Info("Updating deprecated fields for hosted cluster")
@@ -411,16 +394,14 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
}
// Set version status
- {
- hcluster.Status.Version = computeClusterVersionStatus(r.Clock, hcluster, hcp)
- }
+ hcluster.Status.Version = computeClusterVersionStatus(r.Clock, hcluster, hcp)
// Set the ClusterVersionSucceeding based on the hostedcontrolplane
{
condition := metav1.Condition{
Type: string(hyperv1.ClusterVersionSucceeding),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.ClusterVersionStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
ObservedGeneration: hcluster.Generation,
}
if hcp != nil {
@@ -443,7 +424,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
condition := &metav1.Condition{
Type: string(hyperv1.ClusterVersionUpgradeable),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.ClusterVersionStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
Message: "The hosted control plane is not found",
ObservedGeneration: hcluster.Generation,
}
@@ -464,7 +445,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
condition := &metav1.Condition{
Type: string(hyperv1.HostedClusterDegraded),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.ClusterVersionStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
Message: "The hosted control plane is not found",
ObservedGeneration: hcluster.Generation,
}
@@ -526,7 +507,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
} else {
condition.Status = metav1.ConditionTrue
condition.Message = "Configuration passes validation"
- condition.Reason = hyperv1.HostedClusterAsExpectedReason
+ condition.Reason = hyperv1.AsExpectedReason
}
meta.SetStatusCondition(&hcluster.Status.Conditions, condition)
}
@@ -544,7 +525,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
} else {
condition.Status = metav1.ConditionTrue
condition.Message = "HostedCluster is supported by operator configuration"
- condition.Reason = hyperv1.HostedClusterAsExpectedReason
+ condition.Reason = hyperv1.AsExpectedReason
}
meta.SetStatusCondition(&hcluster.Status.Conditions, condition)
}
@@ -628,7 +609,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
newCondition := metav1.Condition{
Type: string(hyperv1.IgnitionEndpointAvailable),
Status: metav1.ConditionUnknown,
- Reason: hyperv1.IgnitionServerDeploymentStatusUnknownReason,
+ Reason: hyperv1.StatusUnknownReason,
}
// Check to ensure the deployment exists and is available.
deployment := ignitionserver.Deployment(controlPlaneNamespace.Name)
@@ -637,7 +618,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
newCondition = metav1.Condition{
Type: string(hyperv1.IgnitionEndpointAvailable),
Status: metav1.ConditionFalse,
- Reason: hyperv1.IgnitionServerDeploymentNotFoundReason,
+ Reason: hyperv1.NotFoundReason,
Message: "Ignition server deployment not found",
}
} else {
@@ -648,7 +629,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
newCondition = metav1.Condition{
Type: string(hyperv1.IgnitionEndpointAvailable),
Status: metav1.ConditionFalse,
- Reason: hyperv1.IgnitionServerDeploymentUnavailableReason,
+ Reason: hyperv1.WaitingForAvailableReason,
Message: "Ignition server deployment is not yet available",
}
for _, cond := range deployment.Status.Conditions {
@@ -656,7 +637,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
newCondition = metav1.Condition{
Type: string(hyperv1.IgnitionEndpointAvailable),
Status: metav1.ConditionTrue,
- Reason: hyperv1.IgnitionServerDeploymentAsExpectedReason,
+ Reason: hyperv1.AsExpectedReason,
Message: "Ignition server deployent is available",
}
break
@@ -831,6 +812,8 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
}
_, ignitionServerHasHealthzHandler := util.ImageLabels(controlPlaneOperatorImageMetadata)[ignitionServerHealthzHandlerLabel]
_, controlplaneOperatorManagesIgnitionServer := util.ImageLabels(controlPlaneOperatorImageMetadata)[controlplaneOperatorManagesIgnitionServerLabel]
+ _, controlPlaneOperatorManagesMachineAutoscaler := util.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineAutoscaler]
+ _, controlPlaneOperatorManagesMachineApprover := util.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineApprover]
p, err := platform.GetPlatform(hcluster, utilitiesImage)
if err != nil {
@@ -1098,7 +1081,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
// Reconcile global config related configmaps and secrets
{
if hcluster.Spec.Configuration != nil {
- configMapRefs := globalconfig.ConfigMapRefs(hcluster.Spec.Configuration)
+ configMapRefs := configrefs.ConfigMapRefs(hcluster.Spec.Configuration)
for _, configMapRef := range configMapRefs {
sourceCM := &corev1.ConfigMap{}
if err := r.Get(ctx, client.ObjectKey{Namespace: hcluster.Namespace, Name: configMapRef}, sourceCM); err != nil {
@@ -1118,7 +1101,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, fmt.Errorf("failed to reconcile referenced config map %s/%s: %w", destCM.Namespace, destCM.Name, err)
}
}
- secretRefs := globalconfig.SecretRefs(hcluster.Spec.Configuration)
+ secretRefs := configrefs.SecretRefs(hcluster.Spec.Configuration)
for _, secretRef := range secretRefs {
sourceSecret := &corev1.Secret{}
if err := r.Get(ctx, client.ObjectKey{Namespace: hcluster.Namespace, Name: secretRef}, sourceSecret); err != nil {
@@ -1272,19 +1255,32 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, fmt.Errorf("failed to reconcile capi provider: %w", err)
}
+ // Get release image version, if needed
+ var releaseImageVersion semver.Version
+ if !controlPlaneOperatorManagesMachineAutoscaler || !controlPlaneOperatorManagesMachineApprover || !controlplaneOperatorManagesIgnitionServer {
+ releaseInfo, err := r.ReleaseProvider.Lookup(ctx, hcluster.Spec.Release.Image, pullSecretBytes)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to lookup release image: %w", err)
+ }
+ releaseImageVersion, err = semver.Parse(releaseInfo.Version())
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to parse release image version: %w", err)
+ }
+ }
+
// In >= 4.11 We want to move most of the components reconciliation down to the CPO https://issues.redhat.com/browse/HOSTEDCP-375.
// For IBM existing clusters < 4.11 we need to stay consistent and keep deploying existing pods to satisfy validations.
// TODO (alberto): drop this after dropping < 4.11 support.
- if _, hasLabel := util.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineApprover]; !hasLabel {
- // Reconcile the autoscaler
- err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage)
+ if !controlPlaneOperatorManagesMachineAutoscaler {
+ // Reconcile the autoscaler.
+ err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile autoscaler: %w", err)
}
}
- if _, hasLabel := util.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineAutoscaler]; !hasLabel {
- // Reconcile the machine approver
- if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage); err != nil {
+ if !controlPlaneOperatorManagesMachineApprover {
+ // Reconcile the machine approver.
+ if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile machine approver: %w", err)
}
}
@@ -1311,6 +1307,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
ignitionServerHasHealthzHandler,
r.ReleaseProvider.GetRegistryOverrides(),
r.ManagementClusterCapabilities.Has(capabilities.CapabilitySecurityContextConstraint),
+ config.MutatingOwnerRefFromHCP(hcp, releaseImageVersion),
); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile ignition server: %w", err)
}
@@ -1395,6 +1392,7 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype
}
}
+ hcp.Spec.Channel = hcluster.Spec.Channel
hcp.Spec.ReleaseImage = hcluster.Spec.Release.Image
hcp.Spec.PullSecret = corev1.LocalObjectReference{Name: controlplaneoperator.PullSecret(hcp.Namespace).Name}
@@ -1410,8 +1408,6 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype
hcp.Spec.ServiceAccountSigningKey = hcluster.Spec.ServiceAccountSigningKey
hcp.Spec.Networking = hcluster.Spec.Networking
- // Populate deprecated fields for compatibility with older control plane operators
- populateDeprecatedNetworkingFields(hcp, hcluster)
hcp.Spec.ClusterID = hcluster.Spec.ClusterID
hcp.Spec.InfraID = hcluster.Spec.InfraID
@@ -1449,75 +1445,9 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype
hcp.Spec.Platform.Type = hyperv1.NonePlatform
}
- // Backward compatible conversions.
- // TODO (alberto): Drop this as we go GA in only support targeted release.
- switch hcluster.Spec.Platform.Type {
- case hyperv1.AWSPlatform:
- // For compatibility with versions of the CPO < 4.12, the HCP KubeCloudControllerCreds secret ref
- // and the roles need to be populated so the old CPO can operate.
- ensureHCPAWSRolesBackwardCompatibility(hcluster, hcp)
- }
-
- if hcluster.Spec.Configuration != nil {
- hcp.Spec.Configuration = hcluster.Spec.Configuration.DeepCopy()
- // for compatibility with previous versions of the CPO, the hcp configuration should be
- // populated with individual fields *AND* the previous raw extension resources.
- items, err := configurationFieldsToRawExtensions(hcluster.Spec.Configuration)
- if err != nil {
- return fmt.Errorf("failed to convert configuration fields to raw extension: %w", err)
- }
- // TODO: cannot remove until IBM's production fleet (4.9_openshift, 4.10_openshift) of control-plane-operators
- // are upgraded to versions that read validation information from new sections
- hcp.Spec.Configuration.Items = items
- secretRef := []corev1.LocalObjectReference{}
- configMapRef := []corev1.LocalObjectReference{}
- for _, secretName := range globalconfig.SecretRefs(hcluster.Spec.Configuration) {
- secretRef = append(secretRef, corev1.LocalObjectReference{
- Name: secretName,
- })
- }
- for _, configMapName := range globalconfig.ConfigMapRefs(hcluster.Spec.Configuration) {
- configMapRef = append(configMapRef, corev1.LocalObjectReference{
- Name: configMapName,
- })
- }
- hcp.Spec.Configuration.SecretRefs = secretRef
- hcp.Spec.Configuration.ConfigMapRefs = configMapRef
- } else {
- hcp.Spec.Configuration = nil
- }
-
return nil
}
-func ensureHCPAWSRolesBackwardCompatibility(hc *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) {
- hcp.Spec.Platform.AWS.KubeCloudControllerCreds = corev1.LocalObjectReference{Name: platformaws.KubeCloudControllerCredsSecret("").Name}
- hcp.Spec.Platform.AWS.Roles = []hyperv1.AWSRoleCredentials{
- {
- ARN: hc.Spec.Platform.AWS.RolesRef.IngressARN,
- Namespace: "openshift-ingress-operator",
- Name: "cloud-credentials",
- },
- {
- ARN: hc.Spec.Platform.AWS.RolesRef.ImageRegistryARN,
- Namespace: "openshift-image-registry",
- Name: "installer-cloud-credentials",
- },
- {
- ARN: hc.Spec.Platform.AWS.RolesRef.StorageARN,
- Namespace: "openshift-cluster-csi-drivers",
- Name: "ebs-cloud-credentials",
- },
- {
- ARN: hc.Spec.Platform.AWS.RolesRef.NetworkARN,
- Namespace: "openshift-cloud-network-config-controller",
- Name: "cloud-credentials",
- },
- }
-
- hcp.Spec.Platform.AWS.RolesRef = hc.Spec.Platform.AWS.RolesRef
-}
-
// reconcileCAPIManager orchestrates orchestrates of all CAPI manager components.
func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster) error {
controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name)
@@ -1868,7 +1798,7 @@ func servicePublishingStrategyByType(hcp *hyperv1.HostedCluster, svcType hyperv1
// reconcileAutoscaler orchestrates reconciliation of autoscaler components using
// both the HostedCluster and the HostedControlPlane which the autoscaler takes
// inputs from.
-func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error {
+func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version) error {
controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name)
err := r.Client.Get(ctx, client.ObjectKeyFromObject(controlPlaneNamespace), controlPlaneNamespace)
if err != nil {
@@ -1944,13 +1874,13 @@ func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, creat
// image based on the following order of precedence (from most to least
// preferred):
//
-// 1. The image specified by the ControlPlaneOperatorImageAnnotation on the
-// HostedCluster resource itself
-// 2. The hypershift image specified in the release payload indicated by the
-// HostedCluster's release field
-// 3. The hypershift-operator's own image for release versions 4.9 and 4.10
-// 4. The registry.ci.openshift.org/hypershift/hypershift:4.8 image for release
-// version 4.8
+// 1. The image specified by the ControlPlaneOperatorImageAnnotation on the
+// HostedCluster resource itself
+// 2. The hypershift image specified in the release payload indicated by the
+// HostedCluster's release field
+// 3. The hypershift-operator's own image for release versions 4.9 and 4.10
+// 4. The registry.ci.openshift.org/hypershift/hypershift:4.8 image for release
+// version 4.8
//
// If no image can be found according to these rules, an error is returned.
func GetControlPlaneOperatorImage(ctx context.Context, hc *hyperv1.HostedCluster, releaseProvider releaseinfo.Provider, hypershiftOperatorImage string, pullSecret []byte) (string, error) {
@@ -2936,10 +2866,16 @@ func reconcileAutoScalerRoleBinding(binding *rbacv1.RoleBinding, role *rbacv1.Ro
// computeClusterVersionStatus determines the ClusterVersionStatus of the
// given HostedCluster and returns it.
func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) *hyperv1.ClusterVersionStatus {
+ if hcp != nil && hcp.Status.VersionStatus != nil {
+ return hcp.Status.VersionStatus
+ }
+
// If there's no history, rebuild it from scratch.
if hcluster.Status.Version == nil || len(hcluster.Status.Version.History) == 0 {
return &hyperv1.ClusterVersionStatus{
- Desired: hcluster.Spec.Release,
+ Desired: configv1.Release{
+ Image: hcluster.Spec.Release.Image,
+ },
ObservedGeneration: hcluster.Generation,
History: []configv1.UpdateHistory{
{
@@ -2951,9 +2887,13 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu
}
}
- // Reconcile the current version with the latest resource states.
+ // Assume the previous status is still current.
version := hcluster.Status.Version.DeepCopy()
+ // The following code is legacy support to preserve
+ // compatability with older HostedControlPlane controllers, which
+ // may not be populating hcp.Status.VersionStatus.
+
// If the hosted control plane doesn't exist, there's no way to assess the
// rollout so return early.
if hcp == nil {
@@ -2965,6 +2905,7 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu
// quite right because the intent here is to identify a terminal rollout
// state. For now it assumes when status.releaseImage matches, that rollout
// is definitely done.
+ //lint:ignore SA1019 consume the deprecated property until we can drop compatability with HostedControlPlane controllers that do not populate hcp.Status.VersionStatus.
hcpRolloutComplete := (hcp.Spec.ReleaseImage == hcp.Status.ReleaseImage) && (version.Desired.Image == hcp.Status.ReleaseImage)
if !hcpRolloutComplete {
return version
@@ -2972,8 +2913,11 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu
// The rollout is complete, so update the current history entry
version.History[0].State = configv1.CompletedUpdate
+ //lint:ignore SA1019 consume the deprecated property until we can drop compatability with HostedControlPlane controllers that do not populate hcp.Status.VersionStatus.
version.History[0].Version = hcp.Status.Version
+ //lint:ignore SA1019 consume the deprecated property until we can drop compatability with HostedControlPlane controllers that do not populate hcp.Status.VersionStatus.
if hcp.Status.LastReleaseImageTransitionTime != nil {
+ //lint:ignore SA1019 consume the deprecated property until we can drop compatability with HostedControlPlane controllers that do not populate hcp.Status.VersionStatus.
version.History[0].CompletionTime = hcp.Status.LastReleaseImageTransitionTime.DeepCopy()
}
@@ -3002,7 +2946,7 @@ func computeHostedClusterAvailability(hcluster *hyperv1.HostedCluster, hcp *hype
// Determine whether the hosted control plane is available.
hcpAvailableStatus := metav1.ConditionFalse
hcpAvailableMessage := "Waiting for hosted control plane to be healthy"
- hcpAvailableReason := hyperv1.HostedClusterWaitingForAvailableReason
+ hcpAvailableReason := hyperv1.WaitingForAvailableReason
var hcpAvailableCondition *metav1.Condition
if hcp != nil {
hcpAvailableCondition = meta.FindStatusCondition(hcp.Status.Conditions, string(hyperv1.HostedControlPlaneAvailable))
@@ -3011,7 +2955,7 @@ func computeHostedClusterAvailability(hcluster *hyperv1.HostedCluster, hcp *hype
hcpAvailableStatus = hcpAvailableCondition.Status
hcpAvailableMessage = hcpAvailableCondition.Message
if hcpAvailableStatus == metav1.ConditionTrue {
- hcpAvailableReason = hyperv1.HostedClusterAsExpectedReason
+ hcpAvailableReason = hyperv1.AsExpectedReason
hcpAvailableMessage = "The hosted control plane is available"
}
}
@@ -3320,7 +3264,7 @@ func (r *HostedClusterReconciler) reconcileClusterPrometheusRBAC(ctx context.Con
return nil
}
-func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error {
+func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version) error {
controlPlaneNamespaceName := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name).Name
// Reconcile machine-approver role
@@ -3487,14 +3431,6 @@ func (r *HostedClusterReconciler) validateConfigAndClusterCapabilities(ctx conte
errs = append(errs, err)
}
- // TODO: Drop when we no longer need to support versions < 4.11
- if hc.Spec.Configuration != nil {
- _, err := globalconfig.ParseGlobalConfig(ctx, hc.Spec.Configuration)
- if err != nil {
- errs = append(errs, fmt.Errorf("failed to parse cluster configuration: %w", err))
- }
- }
-
return utilerrors.NewAggregate(errs)
}
@@ -4482,164 +4418,6 @@ func (r *HostedClusterReconciler) serviceAccountSigningKeyBytes(ctx context.Cont
return privateKeyPEMBytes, publicKeyPEMBytes, nil
}
-// reconcileDeprecatedGlobalConfig converts previously specified configuration in RawExtension format to
-// the new configuration fields. It clears the previous, deprecated configuration.
-// TODO: drop when we no longer need to support versions < 4.11
-func (r *HostedClusterReconciler) reconcileDeprecatedGlobalConfig(ctx context.Context, hc *hyperv1.HostedCluster) error {
-
- // Skip if no deprecated configuration is set
- if hc.Spec.Configuration == nil || len(hc.Spec.Configuration.Items) == 0 {
- return nil
- }
-
- gconfig, err := globalconfig.ParseGlobalConfig(ctx, hc.Spec.Configuration)
- if err != nil {
- // This should never happen because at this point, the global configuration
- // should be valid
- return err
- }
-
- // Copy over config from the raw extension
- if gconfig.APIServer != nil {
- hc.Spec.Configuration.APIServer = &gconfig.APIServer.Spec
- }
- if gconfig.Authentication != nil {
- hc.Spec.Configuration.Authentication = &gconfig.Authentication.Spec
- }
- if gconfig.FeatureGate != nil {
- hc.Spec.Configuration.FeatureGate = &gconfig.FeatureGate.Spec
- }
- if gconfig.Image != nil {
- hc.Spec.Configuration.Image = &gconfig.Image.Spec
- }
- if gconfig.Ingress != nil {
- hc.Spec.Configuration.Ingress = &gconfig.Ingress.Spec
- }
- if gconfig.Network != nil {
- hc.Spec.Configuration.Network = &gconfig.Network.Spec
- }
- if gconfig.OAuth != nil {
- hc.Spec.Configuration.OAuth = &gconfig.OAuth.Spec
- }
- if gconfig.Scheduler != nil {
- hc.Spec.Configuration.Scheduler = &gconfig.Scheduler.Spec
- }
- if gconfig.Proxy != nil {
- hc.Spec.Configuration.Proxy = &gconfig.Proxy.Spec
- }
-
- return nil
-}
-
-// reconcileDeprecatedAWSRoles converts previously specified input in .aws.roles format to
-// the new the rolesRef field. It clears the previous, deprecated configuration.
-// TODO: drop when we no longer need to support versions < 4.12
-func (r *HostedClusterReconciler) reconcileDeprecatedAWSRoles(ctx context.Context, hc *hyperv1.HostedCluster) error {
- // Migrate ARNs from slice into typed fields.
- log := ctrl.LoggerFrom(ctx)
- for _, v := range hc.Spec.Platform.AWS.Roles {
- switch v.Namespace {
- case "openshift-image-registry":
- hc.Spec.Platform.AWS.RolesRef.ImageRegistryARN = v.ARN
- case "openshift-ingress-operator":
- hc.Spec.Platform.AWS.RolesRef.IngressARN = v.ARN
- case "openshift-cloud-network-config-controller":
- hc.Spec.Platform.AWS.RolesRef.NetworkARN = v.ARN
- case "openshift-cluster-csi-drivers":
- hc.Spec.Platform.AWS.RolesRef.StorageARN = v.ARN
- default:
- log.Info("Invalid namespace for deprecated role", "namespace", v.Namespace)
- }
- }
- hc.Spec.Platform.AWS.Roles = nil
-
- // Migrate ARNs from secrets into typed fields.
- if hc.Spec.Platform.AWS.NodePoolManagementCreds.Name != "" {
- nodePoolManagementARN, err := r.getARNFromSecret(ctx, hc.Spec.Platform.AWS.NodePoolManagementCreds.Name, hc.Namespace)
- if err != nil {
- return fmt.Errorf("failed to get ARN from secret: %w", err)
- }
- hc.Spec.Platform.AWS.RolesRef.NodePoolManagementARN = nodePoolManagementARN
- hc.Spec.Platform.AWS.NodePoolManagementCreds = corev1.LocalObjectReference{}
- }
-
- if hc.Spec.Platform.AWS.ControlPlaneOperatorCreds.Name != "" {
- controlPlaneOperatorARN, err := r.getARNFromSecret(ctx, hc.Spec.Platform.AWS.ControlPlaneOperatorCreds.Name, hc.Namespace)
- if err != nil {
- return fmt.Errorf("failed to get ARN from secret: %w", err)
- }
- hc.Spec.Platform.AWS.RolesRef.ControlPlaneOperatorARN = controlPlaneOperatorARN
- hc.Spec.Platform.AWS.ControlPlaneOperatorCreds = corev1.LocalObjectReference{}
- }
-
- if hc.Spec.Platform.AWS.KubeCloudControllerCreds.Name != "" {
- kubeCloudControllerARN, err := r.getARNFromSecret(ctx, hc.Spec.Platform.AWS.KubeCloudControllerCreds.Name, hc.Namespace)
- if err != nil {
- return fmt.Errorf("failed to get ARN from secret: %w", err)
- }
- hc.Spec.Platform.AWS.RolesRef.KubeCloudControllerARN = kubeCloudControllerARN
- hc.Spec.Platform.AWS.KubeCloudControllerCreds = corev1.LocalObjectReference{}
- }
-
- return nil
-}
-
-func (r *HostedClusterReconciler) reconcileDeprecatedNetworkSettings(hc *hyperv1.HostedCluster) error {
- if hc.Spec.Networking.MachineCIDR != "" {
- cidr, err := ipnet.ParseCIDR(hc.Spec.Networking.MachineCIDR)
- if err != nil {
- return fmt.Errorf("failed to parse machine CIDR: %w", err)
- }
- hc.Spec.Networking.MachineNetwork = []hyperv1.MachineNetworkEntry{
- {
- CIDR: *cidr,
- },
- }
- hc.Spec.Networking.MachineCIDR = ""
- }
- if hc.Spec.Networking.PodCIDR != "" {
- cidr, err := ipnet.ParseCIDR(hc.Spec.Networking.PodCIDR)
- if err != nil {
- return fmt.Errorf("failed to parse pod CIDR: %w", err)
- }
- hc.Spec.Networking.ClusterNetwork = []hyperv1.ClusterNetworkEntry{
- {
- CIDR: *cidr,
- },
- }
- hc.Spec.Networking.PodCIDR = ""
- }
- if hc.Spec.Networking.ServiceCIDR != "" {
- cidr, err := ipnet.ParseCIDR(hc.Spec.Networking.ServiceCIDR)
- if err != nil {
- return fmt.Errorf("failed to parse service CIDR: %w", err)
- }
- hc.Spec.Networking.ServiceNetwork = []hyperv1.ServiceNetworkEntry{
- {
- CIDR: *cidr,
- },
- }
- hc.Spec.Networking.ServiceCIDR = ""
- }
- return nil
-}
-
-func populateDeprecatedNetworkingFields(hcp *hyperv1.HostedControlPlane, hc *hyperv1.HostedCluster) {
- hcp.Spec.ServiceCIDR = util.FirstServiceCIDR(hc.Spec.Networking.ServiceNetwork)
- hcp.Spec.PodCIDR = util.FirstClusterCIDR(hc.Spec.Networking.ClusterNetwork)
- hcp.Spec.MachineCIDR = util.FirstMachineCIDR(hc.Spec.Networking.MachineNetwork)
- hcp.Spec.NetworkType = hc.Spec.Networking.NetworkType
- if hc.Spec.Networking.APIServer != nil {
- hcp.Spec.APIAdvertiseAddress = hc.Spec.Networking.APIServer.AdvertiseAddress
- hcp.Spec.APIPort = hc.Spec.Networking.APIServer.Port
- hcp.Spec.APIAllowedCIDRBlocks = hc.Spec.Networking.APIServer.AllowedCIDRBlocks
- } else {
- hcp.Spec.APIAdvertiseAddress = nil
- hcp.Spec.APIPort = nil
- hcp.Spec.APIAllowedCIDRBlocks = nil
- }
-}
-
func (r *HostedClusterReconciler) getARNFromSecret(ctx context.Context, name, namespace string) (string, error) {
creds := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -4657,109 +4435,32 @@ func (r *HostedClusterReconciler) getARNFromSecret(ctx context.Context, name, na
return credContent.Section("default").Key("role_arn").String(), nil
}
-func configurationFieldsToRawExtensions(config *hyperv1.ClusterConfiguration) ([]runtime.RawExtension, error) {
- var result []runtime.RawExtension
- if config == nil {
- return result, nil
- }
- if config.APIServer != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.APIServer{
- Spec: *config.APIServer,
- },
- })
- }
- if config.Authentication != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Authentication{
- Spec: *config.Authentication,
- },
- })
- }
- if config.FeatureGate != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.FeatureGate{
- Spec: *config.FeatureGate,
- },
- })
- }
- if config.Image != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Image{
- Spec: *config.Image,
- },
- })
- }
- if config.Ingress != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Ingress{
- Spec: *config.Ingress,
- },
- })
- }
- if config.Network != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Network{
- Spec: *config.Network,
- },
- })
- }
- if config.OAuth != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.OAuth{
- Spec: *config.OAuth,
- },
- })
- }
- if config.Scheduler != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Scheduler{
- Spec: *config.Scheduler,
- },
- })
- }
- if config.Proxy != nil {
- result = append(result, runtime.RawExtension{
- Object: &configv1.Proxy{
- Spec: *config.Proxy,
- },
- })
- }
-
- serializer := kjson.NewSerializerWithOptions(
- kjson.DefaultMetaFactory, api.Scheme, api.Scheme,
- kjson.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
- )
- for idx := range result {
- gvk, err := apiutil.GVKForObject(result[idx].Object, api.Scheme)
+func (r *HostedClusterReconciler) dereferenceAWSRoles(ctx context.Context, rolesRef *hyperv1.AWSRolesRef, ns string) error {
+ if strings.HasPrefix(rolesRef.NodePoolManagementARN, "arn-from-secret::") {
+ secretName := strings.TrimPrefix(rolesRef.NodePoolManagementARN, "arn-from-secret::")
+ arn, err := r.getARNFromSecret(ctx, secretName, ns)
if err != nil {
- return nil, fmt.Errorf("failed to get gvk for %T: %w", result[idx].Object, err)
+ return fmt.Errorf("failed to get ARN from secret %s/%s: %w", ns, secretName, err)
}
- result[idx].Object.GetObjectKind().SetGroupVersionKind(gvk)
+ rolesRef.NodePoolManagementARN = arn
+ }
- // We do a DeepEqual in the upsert func, so we must match the deserialized version from
- // the server which has Raw set and Object unset.
- b := &bytes.Buffer{}
- if err := serializer.Encode(result[idx].Object, b); err != nil {
- return nil, fmt.Errorf("failed to marshal %+v: %w", result[idx].Object, err)
+ if strings.HasPrefix(rolesRef.ControlPlaneOperatorARN, "arn-from-secret::") {
+ secretName := strings.TrimPrefix(rolesRef.ControlPlaneOperatorARN, "arn-from-secret::")
+ arn, err := r.getARNFromSecret(ctx, secretName, ns)
+ if err != nil {
+ return fmt.Errorf("failed to get ARN from secret %s/%s: %w", ns, secretName, err)
}
+ rolesRef.ControlPlaneOperatorARN = arn
+ }
- // Remove the status part of the serialized resource. We only have
- // spec to begin with and status causes incompatibilities with previous
- // versions of the CPO
- unstructuredObject := &unstructured.Unstructured{}
- if _, _, err := unstructured.UnstructuredJSONScheme.Decode(b.Bytes(), nil, unstructuredObject); err != nil {
- return nil, fmt.Errorf("failed to decode resource into unstructured: %w", err)
- }
- unstructured.RemoveNestedField(unstructuredObject.Object, "status")
- b = &bytes.Buffer{}
- if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredObject, b); err != nil {
- return nil, fmt.Errorf("failed to serialize unstructured resource: %w", err)
+ if strings.HasPrefix(rolesRef.KubeCloudControllerARN, "arn-from-secret::") {
+ secretName := strings.TrimPrefix(rolesRef.KubeCloudControllerARN, "arn-from-secret::")
+ arn, err := r.getARNFromSecret(ctx, secretName, ns)
+ if err != nil {
+ return fmt.Errorf("failed to get ARN from secret %s/%s: %w", ns, secretName, err)
}
-
- result[idx].Raw = bytes.TrimSuffix(b.Bytes(), []byte("\n"))
- result[idx].Object = nil
+ rolesRef.KubeCloudControllerARN = arn
}
-
- return result, nil
+ return nil
}
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
index 729fe5d3614..586c7a1d603 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go
@@ -1,9 +1,7 @@
package hostedcluster
import (
- "bytes"
"context"
- "encoding/json"
"errors"
"fmt"
"testing"
@@ -15,13 +13,11 @@ import (
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests"
- platformaws "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/aws"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/autoscaler"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplaneoperator"
- hyperapi "github.com/openshift/hypershift/support/api"
"github.com/openshift/hypershift/support/capabilities"
fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake"
fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake"
@@ -34,14 +30,10 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
errors2 "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- serializerjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
- clocktesting "k8s.io/utils/clock/testing"
"k8s.io/utils/pointer"
capiawsv1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
capibmv1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
@@ -74,7 +66,7 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
+ Desired: configv1.Release{Image: "a"},
History: []configv1.UpdateHistory{
{Image: "a", State: configv1.PartialUpdate},
},
@@ -96,7 +88,7 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
+ Desired: configv1.Release{Image: "a"},
History: []configv1.UpdateHistory{
{Image: "a", State: configv1.CompletedUpdate},
},
@@ -106,7 +98,11 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
ControlPlane: hyperv1.HostedControlPlane{
ObjectMeta: metav1.ObjectMeta{CreationTimestamp: Now},
Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{ReleaseImage: "a"},
+ Status: hyperv1.HostedControlPlaneStatus{
+ VersionStatus: &hyperv1.ClusterVersionStatus{
+ Desired: configv1.Release{Image: "a"},
+ },
+ },
},
ExpectedImage: "b",
},
@@ -119,7 +115,7 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "b"},
+ Desired: configv1.Release{Image: "b"},
History: []configv1.UpdateHistory{
{Image: "b", State: configv1.PartialUpdate},
{Image: "a", State: configv1.CompletedUpdate},
@@ -130,7 +126,11 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
ControlPlane: hyperv1.HostedControlPlane{
ObjectMeta: metav1.ObjectMeta{CreationTimestamp: Now},
Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{ReleaseImage: "a"},
+ Status: hyperv1.HostedControlPlaneStatus{
+ VersionStatus: &hyperv1.ClusterVersionStatus{
+ Desired: configv1.Release{Image: "a"},
+ },
+ },
},
ExpectedImage: "b",
},
@@ -151,135 +151,6 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) {
}
}
-func TestComputeClusterVersionStatus(t *testing.T) {
- tests := map[string]struct {
- // TODO: incorporate conditions?
- Cluster hyperv1.HostedCluster
- ControlPlane hyperv1.HostedControlPlane
- ExpectedStatus hyperv1.ClusterVersionStatus
- }{
- "missing history causes new rollout": {
- Cluster: hyperv1.HostedCluster{
- Spec: hyperv1.HostedClusterSpec{Release: hyperv1.Release{Image: "a"}},
- },
- ControlPlane: hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{},
- },
- ExpectedStatus: hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", State: configv1.PartialUpdate, StartedTime: Now},
- },
- },
- },
- "hosted cluster spec is newer than completed control plane spec should not cause update to be completed": {
- Cluster: hyperv1.HostedCluster{
- Spec: hyperv1.HostedClusterSpec{Release: hyperv1.Release{Image: "b"}},
- Status: hyperv1.HostedClusterStatus{
- Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "b"},
- History: []configv1.UpdateHistory{
- {Image: "b", Version: "", State: configv1.PartialUpdate, StartedTime: Now},
- {Image: "a", Version: "1.0.0", State: configv1.CompletedUpdate, StartedTime: Now, CompletionTime: &Later},
- },
- },
- },
- },
- ControlPlane: hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{ReleaseImage: "a", Version: "1.0.0", LastReleaseImageTransitionTime: &Now},
- },
- ExpectedStatus: hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "b"},
- History: []configv1.UpdateHistory{
- {Image: "b", Version: "", State: configv1.PartialUpdate, StartedTime: Now},
- {Image: "a", Version: "1.0.0", State: configv1.CompletedUpdate, StartedTime: Now, CompletionTime: &Later},
- },
- },
- },
- "completed rollout updates history": {
- Cluster: hyperv1.HostedCluster{
- Spec: hyperv1.HostedClusterSpec{Release: hyperv1.Release{Image: "a"}},
- Status: hyperv1.HostedClusterStatus{
- Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", State: configv1.PartialUpdate, StartedTime: Now},
- },
- },
- },
- },
- ControlPlane: hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{ReleaseImage: "a", Version: "1.0.0", LastReleaseImageTransitionTime: &Later},
- },
- ExpectedStatus: hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", Version: "1.0.0", State: configv1.CompletedUpdate, StartedTime: Now, CompletionTime: &Later},
- },
- },
- },
- "new rollout happens after existing rollout completes": {
- Cluster: hyperv1.HostedCluster{
- Spec: hyperv1.HostedClusterSpec{Release: hyperv1.Release{Image: "b"}},
- Status: hyperv1.HostedClusterStatus{
- Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", State: configv1.CompletedUpdate, StartedTime: Now, CompletionTime: &Later},
- },
- },
- },
- },
- ControlPlane: hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{ReleaseImage: "a", Version: "1.0.0", LastReleaseImageTransitionTime: &Later},
- },
- ExpectedStatus: hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "b"},
- History: []configv1.UpdateHistory{
- {Image: "b", State: configv1.PartialUpdate, StartedTime: Now},
- {Image: "a", Version: "1.0.0", State: configv1.CompletedUpdate, StartedTime: Now, CompletionTime: &Later},
- },
- },
- },
- "new rollout is deferred until existing rollout completes": {
- Cluster: hyperv1.HostedCluster{
- Spec: hyperv1.HostedClusterSpec{Release: hyperv1.Release{Image: "b"}},
- Status: hyperv1.HostedClusterStatus{
- Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", State: configv1.PartialUpdate, StartedTime: Now},
- },
- },
- },
- },
- ControlPlane: hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{ReleaseImage: "a"},
- Status: hyperv1.HostedControlPlaneStatus{},
- },
- ExpectedStatus: hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{Image: "a"},
- History: []configv1.UpdateHistory{
- {Image: "a", State: configv1.PartialUpdate, StartedTime: Now},
- },
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- actualStatus := computeClusterVersionStatus(clocktesting.NewFakeClock(Now.Time), &test.Cluster, &test.ControlPlane)
- if !equality.Semantic.DeepEqual(&test.ExpectedStatus, actualStatus) {
- t.Errorf(cmp.Diff(&test.ExpectedStatus, actualStatus))
- }
- })
- }
-}
-
func TestComputeHostedClusterAvailability(t *testing.T) {
tests := map[string]struct {
Cluster hyperv1.HostedCluster
@@ -486,10 +357,6 @@ func TestReconcileHostedControlPlaneAPINetwork(t *testing.T) {
}
g := NewGomegaWithT(t)
if test.networking != nil {
- // deprecated values should still be populated
- g.Expect(hostedControlPlane.Spec.APIPort).To(Equal(test.expectedAPIPort))
- g.Expect(hostedControlPlane.Spec.APIAdvertiseAddress).To(Equal(test.expectedAPIAdvertiseAddress))
-
// new values should also be populated
g.Expect(hostedControlPlane.Spec.Networking.APIServer).ToNot(BeNil())
g.Expect(hostedControlPlane.Spec.Networking.APIServer.Port).To(Equal(test.expectedAPIPort))
@@ -1346,7 +1213,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.11.0",
},
},
@@ -1378,7 +1245,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.10.0",
},
},
@@ -1410,7 +1277,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.10.0",
},
},
@@ -1467,7 +1334,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.10.0",
},
},
@@ -1499,7 +1366,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.10.1",
},
},
@@ -1531,7 +1398,7 @@ func TestValidateReleaseImage(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "image-4.11.0",
},
},
@@ -1693,72 +1560,6 @@ func TestDefaultClusterIDsIfNeeded(t *testing.T) {
}
}
-func TestConfigurationFieldsToRawExtensions(t *testing.T) {
- config := &hyperv1.ClusterConfiguration{
- Ingress: &configv1.IngressSpec{Domain: "example.com"},
- Proxy: &configv1.ProxySpec{HTTPProxy: "http://10.0.136.57:3128", HTTPSProxy: "http://10.0.136.57:3128"},
- }
- result, err := configurationFieldsToRawExtensions(config)
- if err != nil {
- t.Fatalf("configurationFieldsToRawExtensions: %v", err)
- }
-
- // Check that serialized resources do not contain a status section
- for i, rawExt := range result {
- unstructuredObj := &unstructured.Unstructured{}
- _, _, err := unstructured.UnstructuredJSONScheme.Decode(rawExt.Raw, nil, unstructuredObj)
- if err != nil {
- t.Fatalf("unexpected decode error: %v", err)
- }
- _, exists, err := unstructured.NestedFieldNoCopy(unstructuredObj.Object, "status")
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if exists {
- t.Errorf("status field exists for resource %d", i)
- }
- }
-
- serialized, err := json.Marshal(result)
- if err != nil {
- t.Fatalf("json.Marshal: %v", err)
- }
-
- var roundtripped []runtime.RawExtension
- if err := json.Unmarshal(serialized, &roundtripped); err != nil {
- t.Fatalf("json.Unmarshal: %v", err)
- }
-
- // CreateOrUpdate does a naive DeepEqual which can not deal with custom unmarshallers, so make
- // sure the output matches a roundtripped result.
- if diff := cmp.Diff(result, roundtripped); diff != "" {
- t.Errorf("output does not match a json-roundtripped version: %s", diff)
- }
-
- var ingress configv1.Ingress
- if err := json.Unmarshal(result[0].Raw, &ingress); err != nil {
- t.Fatalf("failed to unmarshal raw data: %v", err)
- }
- if ingress.APIVersion == "" || ingress.Kind == "" {
- t.Errorf("rawObject has no apiVersion or kind set: %+v", ingress.ObjectMeta)
- }
- if ingress.Spec.Domain != "example.com" {
- t.Errorf("ingress does not have expected domain: %q", ingress.Spec.Domain)
- }
-
- var proxy configv1.Proxy
- if err := json.Unmarshal(result[1].Raw, &proxy); err != nil {
- t.Fatalf("failed to unmarshal raw data: %v", err)
- }
- if proxy.APIVersion == "" || proxy.Kind == "" {
- t.Errorf("rawObject has no apiVersion or kind set: %+v", proxy.ObjectMeta)
- }
- if proxy.Spec.HTTPProxy != "http://10.0.136.57:3128" {
- t.Errorf("proxy does not have expected HTTPProxy: %q", proxy.Spec.HTTPProxy)
- }
-
-}
-
func TestIsUpgradeable(t *testing.T) {
releaseImageFrom := "image:1.2"
releaseImageTo := "image:1.3"
@@ -1793,7 +1594,7 @@ func TestIsUpgradeable(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: releaseImageFrom,
},
},
@@ -1812,7 +1613,7 @@ func TestIsUpgradeable(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: releaseImageFrom,
},
},
@@ -1842,7 +1643,7 @@ func TestIsUpgradeable(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: releaseImageFrom,
},
},
@@ -1872,7 +1673,7 @@ func TestIsUpgradeable(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: releaseImageFrom,
},
},
@@ -1902,367 +1703,6 @@ func TestIsUpgradeable(t *testing.T) {
}
}
-func TestReconcileDeprecatedAWSRoles(t *testing.T) {
- testNamespace := "test"
-
- // Emulate user input secrets pre-created by the CLI.
- kubeCloudControllerARN := "kubeCloudControllerARN"
- kubeCloudControllerSecretName := "kubeCloudControllerCreds"
- kubeCloudControllerSecret := &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: "Secret",
- APIVersion: corev1.SchemeGroupVersion.String(),
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNamespace,
- Name: kubeCloudControllerSecretName,
- },
- Data: map[string][]byte{
- "credentials": []byte(fmt.Sprintf(`[default]
-role_arn = %s
-web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
-`, kubeCloudControllerARN)),
- },
- }
-
- nodePoolManagementARN := "nodePoolManagementARN"
- nodePoolManagementSecretName := "nodePoolManagementCreds"
- nodePoolManagementSecret := &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: "Secret",
- APIVersion: corev1.SchemeGroupVersion.String(),
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNamespace,
- Name: nodePoolManagementSecretName,
- },
- Data: map[string][]byte{
- "credentials": []byte(fmt.Sprintf(`[default]
-role_arn = %s
-web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
-`, nodePoolManagementARN)),
- },
- }
-
- controlPlaneOperatorARN := "controlPlaneOperatorARN"
- controlPlaneOperatorSecretName := "controlPlaneOperatorCreds"
- controlPlaneOperatorSecret := &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: "Secret",
- APIVersion: corev1.SchemeGroupVersion.String(),
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNamespace,
- Name: controlPlaneOperatorSecretName,
- },
- Data: map[string][]byte{
- "credentials": []byte(fmt.Sprintf(`[default]
-role_arn = %s
-web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
-`, controlPlaneOperatorARN)),
- },
- }
-
- // Emulate user input.
- ingressARN := "ingressARN"
- imageRegistryARN := "registryARN"
- storageARN := "ebsARN"
- networkARN := "networkARN"
-
- hc := &hyperv1.HostedCluster{
- ObjectMeta: metav1.ObjectMeta{
- Name: "",
- Namespace: testNamespace,
- },
- Spec: hyperv1.HostedClusterSpec{
- Platform: hyperv1.PlatformSpec{
- Type: hyperv1.AWSPlatform,
- AWS: &hyperv1.AWSPlatformSpec{
- RolesRef: hyperv1.AWSRolesRef{
- IngressARN: "",
- ImageRegistryARN: "",
- StorageARN: "",
- NetworkARN: "",
- KubeCloudControllerARN: "",
- NodePoolManagementARN: "",
- ControlPlaneOperatorARN: "",
- },
- Roles: []hyperv1.AWSRoleCredentials{
- {
- ARN: ingressARN,
- Namespace: "openshift-ingress-operator",
- Name: "cloud-credentials",
- },
- {
- ARN: imageRegistryARN,
- Namespace: "openshift-image-registry",
- Name: "installer-cloud-credentials",
- },
- {
- ARN: storageARN,
- Namespace: "openshift-cluster-csi-drivers",
- Name: "ebs-cloud-credentials",
- },
- {
- ARN: networkARN,
- Namespace: "openshift-cloud-network-config-controller",
- Name: "cloud-credentials",
- },
- },
- KubeCloudControllerCreds: corev1.LocalObjectReference{Name: kubeCloudControllerSecretName},
- NodePoolManagementCreds: corev1.LocalObjectReference{Name: nodePoolManagementSecretName},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{Name: controlPlaneOperatorSecretName},
- },
- },
- },
- Status: hyperv1.HostedClusterStatus{},
- }
-
- // Expect old fields to be migrated.
- expectedAWSPlatformSpec := &hyperv1.AWSPlatformSpec{
- Region: "",
- CloudProviderConfig: nil,
- ServiceEndpoints: nil,
- RolesRef: hyperv1.AWSRolesRef{
- IngressARN: ingressARN,
- ImageRegistryARN: imageRegistryARN,
- StorageARN: storageARN,
- NetworkARN: networkARN,
- KubeCloudControllerARN: kubeCloudControllerARN,
- NodePoolManagementARN: nodePoolManagementARN,
- ControlPlaneOperatorARN: controlPlaneOperatorARN,
- },
- Roles: nil,
- KubeCloudControllerCreds: corev1.LocalObjectReference{},
- NodePoolManagementCreds: corev1.LocalObjectReference{},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{},
- ResourceTags: nil,
- EndpointAccess: "",
- }
-
- client := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(controlPlaneOperatorSecret, nodePoolManagementSecret, kubeCloudControllerSecret).Build()
- r := &HostedClusterReconciler{Client: client}
-
- g := NewGomegaWithT(t)
- err := r.reconcileDeprecatedAWSRoles(context.Background(), hc)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(hc.Spec.Platform.AWS).To(BeEquivalentTo(expectedAWSPlatformSpec))
-}
-
-func TestEnsureHCPAWSRolesBackwardCompatibility(t *testing.T) {
- ingressARN := "ingressARN"
- imageRegistryARN := "imageRegistryARN"
- storageARN := "storageARN"
- networkARN := "networkARN"
-
- hc := &hyperv1.HostedCluster{
- ObjectMeta: metav1.ObjectMeta{
- Name: "",
- Namespace: "",
- },
- Spec: hyperv1.HostedClusterSpec{
- Platform: hyperv1.PlatformSpec{
- Type: hyperv1.AWSPlatform,
- AWS: &hyperv1.AWSPlatformSpec{
- RolesRef: hyperv1.AWSRolesRef{
- IngressARN: ingressARN,
- ImageRegistryARN: imageRegistryARN,
- StorageARN: storageARN,
- NetworkARN: networkARN,
- KubeCloudControllerARN: "anything",
- NodePoolManagementARN: "anything",
- ControlPlaneOperatorARN: "anything",
- },
- },
- },
- },
- }
-
- expectedAWSPlatformSpec := &hyperv1.AWSPlatformSpec{
- Region: "",
- CloudProviderConfig: nil,
- ServiceEndpoints: nil,
- RolesRef: hyperv1.AWSRolesRef{
- IngressARN: ingressARN,
- ImageRegistryARN: imageRegistryARN,
- StorageARN: storageARN,
- NetworkARN: networkARN,
- KubeCloudControllerARN: "anything",
- NodePoolManagementARN: "anything",
- ControlPlaneOperatorARN: "anything",
- },
- Roles: []hyperv1.AWSRoleCredentials{
- {
- ARN: ingressARN,
- Namespace: "openshift-ingress-operator",
- Name: "cloud-credentials",
- },
- {
- ARN: imageRegistryARN,
- Namespace: "openshift-image-registry",
- Name: "installer-cloud-credentials",
- },
- {
- ARN: storageARN,
- Namespace: "openshift-cluster-csi-drivers",
- Name: "ebs-cloud-credentials",
- },
- {
- ARN: networkARN,
- Namespace: "openshift-cloud-network-config-controller",
- Name: "cloud-credentials",
- },
- },
- KubeCloudControllerCreds: corev1.LocalObjectReference{Name: platformaws.KubeCloudControllerCredsSecret("").Name},
- NodePoolManagementCreds: corev1.LocalObjectReference{},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{},
- ResourceTags: nil,
- EndpointAccess: "",
- }
-
- g := NewGomegaWithT(t)
- hcp := &hyperv1.HostedControlPlane{
- Spec: hyperv1.HostedControlPlaneSpec{
- Platform: hyperv1.PlatformSpec{
- Type: hyperv1.AWSPlatform,
- AWS: &hyperv1.AWSPlatformSpec{
- Region: "",
- CloudProviderConfig: nil,
- ServiceEndpoints: nil,
- RolesRef: hyperv1.AWSRolesRef{},
- Roles: nil,
- KubeCloudControllerCreds: corev1.LocalObjectReference{},
- NodePoolManagementCreds: corev1.LocalObjectReference{},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{},
- ResourceTags: nil,
- EndpointAccess: "",
- },
- },
- },
- }
- ensureHCPAWSRolesBackwardCompatibility(hc, hcp)
- g.Expect(hcp.Spec.Platform.AWS).To(BeEquivalentTo(expectedAWSPlatformSpec))
-}
-
-func TestReconcileDeprecatedGlobalConfig(t *testing.T) {
- hc := &hyperv1.HostedCluster{}
- hc.Name = "fake-name"
- hc.Namespace = "fake-namespace"
-
- apiServer := &configv1.APIServer{
- TypeMeta: metav1.TypeMeta{
- APIVersion: configv1.SchemeGroupVersion.String(),
- Kind: "APIServer",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "cluster",
- },
- Spec: configv1.APIServerSpec{
- Audit: configv1.Audit{
- // Populate kubebuilder default for comparison
- // https://github.com/openshift/api/blob/f120778bee805ad1a7a4f05a6430332cf5811813/config/v1/types_apiserver.go#L57
- Profile: configv1.DefaultAuditProfileType,
- },
- ClientCA: configv1.ConfigMapNameReference{
- Name: "fake-ca",
- },
- },
- }
-
- jsonSerializer := serializerjson.NewSerializerWithOptions(
- serializerjson.DefaultMetaFactory, hyperapi.Scheme, hyperapi.Scheme,
- serializerjson.SerializerOptions{Yaml: false, Pretty: true, Strict: false},
- )
-
- serializedAPIServer := &bytes.Buffer{}
- err := jsonSerializer.Encode(apiServer, serializedAPIServer)
- if err != nil {
- t.Fatalf("failed to serialize apiserver: %v", err)
- }
-
- hc.Spec.Configuration = &hyperv1.ClusterConfiguration{
- Items: []runtime.RawExtension{
- {
- Raw: serializedAPIServer.Bytes(),
- },
- },
- ConfigMapRefs: []corev1.LocalObjectReference{
- {
- Name: "fake-ca",
- },
- },
- SecretRefs: []corev1.LocalObjectReference{
- {
- Name: "fake-creds",
- },
- },
- }
-
- fakeClient := fake.NewClientBuilder().
- WithScheme(hyperapi.Scheme).
- WithObjects(hc).
- Build()
- reconciler := &HostedClusterReconciler{
- Client: fakeClient,
- }
-
- originalSpec := hc.Spec.DeepCopy()
- if err := reconciler.reconcileDeprecatedGlobalConfig(context.Background(), hc); err != nil {
- t.Fatalf("unexpected reconcile error: %v", err)
- }
-
- // Update fields if required.
- if !equality.Semantic.DeepEqual(&hc.Spec, originalSpec) {
- err := reconciler.Client.Update(context.Background(), hc)
- if err != nil {
- t.Fatalf("unexpected update error: %v", err)
- }
- }
-
- updatedHc := &hyperv1.HostedCluster{}
- if err := fakeClient.Get(context.Background(), crclient.ObjectKeyFromObject(hc), updatedHc); err != nil {
- t.Fatalf("unexpected get error: %v", err)
- }
- if updatedHc.Spec.Configuration == nil {
- t.Fatalf("unexpected nil configuration")
- }
- if len(updatedHc.Spec.Configuration.Items) == 0 {
- t.Errorf("empty deprecated configuration")
- }
- if len(updatedHc.Spec.Configuration.ConfigMapRefs) == 0 {
- t.Errorf("empty configmap refs")
- }
- if len(updatedHc.Spec.Configuration.SecretRefs) == 0 {
- t.Errorf("emtpy secret refs")
- }
- if !equality.Semantic.DeepEqual(&apiServer.Spec, updatedHc.Spec.Configuration.APIServer) {
- t.Errorf("unexpected apiserver spec: %#v", updatedHc.Spec.Configuration.APIServer)
- }
-
- // Update deprecated field, remove test when field is unsupported
- apiServer.Spec.ClientCA.Name = "updated-ca"
- serializedAPIServer.Reset()
- err = jsonSerializer.Encode(apiServer, serializedAPIServer)
- if err != nil {
- t.Fatalf("failed to serialize apiserver: %v", err)
- }
- updatedHc.Spec.Configuration.Items = []runtime.RawExtension{{Raw: serializedAPIServer.Bytes()}}
- if err := reconciler.reconcileDeprecatedGlobalConfig(context.Background(), updatedHc); err != nil {
- t.Fatalf("unexpected reconcile error: %v", err)
- }
- err = reconciler.Client.Update(context.Background(), updatedHc)
- if err != nil {
- t.Fatalf("unexpected update error: %v", err)
- }
- updatedHcAgain := &hyperv1.HostedCluster{}
- if err := fakeClient.Get(context.Background(), crclient.ObjectKeyFromObject(updatedHc), updatedHcAgain); err != nil {
- t.Fatalf("unexpected get error: %v", err)
- }
- if !equality.Semantic.DeepEqual(&apiServer.Spec, updatedHcAgain.Spec.Configuration.APIServer) {
- t.Errorf("unexpected apiserver spec on update: %#v", updatedHcAgain.Spec.Configuration.APIServer)
- }
-}
-
func TestReconciliationSuccessConditionSetting(t *testing.T) {
// Serialization seems to round to seconds, so we have to do the
@@ -2428,7 +1868,7 @@ func TestIsProgressing(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "release-1.2",
},
},
@@ -2459,7 +1899,7 @@ func TestIsProgressing(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "release-1.2",
},
},
@@ -2478,7 +1918,7 @@ func TestIsProgressing(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "release-1.2",
},
},
@@ -2503,7 +1943,7 @@ func TestIsProgressing(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "release-1.2",
},
},
@@ -2533,7 +1973,7 @@ func TestIsProgressing(t *testing.T) {
},
Status: hyperv1.HostedClusterStatus{
Version: &hyperv1.ClusterVersionStatus{
- Desired: hyperv1.Release{
+ Desired: configv1.Release{
Image: "release-1.2",
},
},
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go
index bb4091ce0a2..33b810360de 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go
@@ -7,9 +7,7 @@ import (
"reflect"
"strings"
- corev1 "k8s.io/api/core/v1"
-
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
@@ -23,10 +21,33 @@ type Webhook struct{}
// SetupWebhookWithManager sets up HostedCluster webhooks.
func SetupWebhookWithManager(mgr ctrl.Manager) error {
- return ctrl.NewWebhookManagedBy(mgr).
+ err := ctrl.NewWebhookManagedBy(mgr).
For(&hyperv1.HostedCluster{}).
WithValidator(&Webhook{}).
Complete()
+ if err != nil {
+ return fmt.Errorf("unable to register hostedcluster webhook: %w", err)
+ }
+ err = ctrl.NewWebhookManagedBy(mgr).
+ For(&hyperv1.NodePool{}).
+ Complete()
+ if err != nil {
+ return fmt.Errorf("unable to register nodepool webhook: %w", err)
+ }
+ err = ctrl.NewWebhookManagedBy(mgr).
+ For(&hyperv1.HostedControlPlane{}).
+ Complete()
+ if err != nil {
+ return fmt.Errorf("unable to register hostedcontrolplane webhook: %w", err)
+ }
+ err = ctrl.NewWebhookManagedBy(mgr).
+ For(&hyperv1.AWSEndpointService{}).
+ Complete()
+ if err != nil {
+ return fmt.Errorf("unable to register awsendpointservice webhook: %w", err)
+ }
+ return nil
+
}
var _ webhook.CustomValidator = &Webhook{}
@@ -76,10 +97,6 @@ func filterMutableHostedClusterSpecFields(spec *hyperv1.HostedClusterSpec) {
spec.Platform.AWS.ResourceTags = nil
// This is to enable reconcileDeprecatedAWSRoles.
spec.Platform.AWS.RolesRef = hyperv1.AWSRolesRef{}
- spec.Platform.AWS.Roles = []hyperv1.AWSRoleCredentials{}
- spec.Platform.AWS.NodePoolManagementCreds = corev1.LocalObjectReference{}
- spec.Platform.AWS.ControlPlaneOperatorCreds = corev1.LocalObjectReference{}
- spec.Platform.AWS.KubeCloudControllerCreds = corev1.LocalObjectReference{}
}
// This is to enable reconcileDeprecatedNetworkSettings
diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go
index 60bc1e7926e..2ffe48af92a 100644
--- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go
+++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go
@@ -8,7 +8,7 @@ import (
"github.com/go-logr/zapr"
configv1 "github.com/openshift/api/config/v1"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake"
fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client"
@@ -198,20 +198,6 @@ func TestValidateHostedClusterUpdate(t *testing.T) {
Type: hyperv1.AWSPlatform,
AWS: &hyperv1.AWSPlatformSpec{
RolesRef: hyperv1.AWSRolesRef{},
- Roles: []hyperv1.AWSRoleCredentials{
- {
- ARN: "test",
- Namespace: "test",
- Name: "test",
- },
- {
- ARN: "test",
- Namespace: "test",
- Name: "test",
- }},
- KubeCloudControllerCreds: corev1.LocalObjectReference{Name: "test"},
- NodePoolManagementCreds: corev1.LocalObjectReference{Name: "test"},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{Name: "test"},
},
},
},
@@ -230,10 +216,6 @@ func TestValidateHostedClusterUpdate(t *testing.T) {
NodePoolManagementARN: "test",
ControlPlaneOperatorARN: "test",
},
- Roles: nil,
- KubeCloudControllerCreds: corev1.LocalObjectReference{},
- NodePoolManagementCreds: corev1.LocalObjectReference{},
- ControlPlaneOperatorCreds: corev1.LocalObjectReference{},
},
},
},
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent.go b/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent.go
index 0468f5e43c6..acbd2b6c6d7 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent.go
@@ -6,7 +6,7 @@ import (
"os"
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
hyperutil "github.com/openshift/hypershift/hypershift-operator/controllers/util"
"github.com/openshift/hypershift/support/images"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent_test.go b/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent_test.go
index c4baf021891..b3352c5c424 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent_test.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/agent/agent_test.go
@@ -9,7 +9,7 @@ import (
. "github.com/onsi/gomega"
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
"github.com/openshift/hypershift/support/upsert"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go
index 496149773e8..68c11e0335b 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go
@@ -7,7 +7,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas"
"github.com/openshift/hypershift/support/images"
"github.com/openshift/hypershift/support/upsert"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws_test.go b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws_test.go
index 334cf874954..1fc99055697 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws_test.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capiawsv1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
)
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/azure/azure.go b/hypershift-operator/controllers/hostedcluster/internal/platform/azure/azure.go
index bb2e1d104f0..9ed6aaf966f 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/azure/azure.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/azure/azure.go
@@ -5,7 +5,7 @@ import (
"fmt"
"os"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/images"
"github.com/openshift/hypershift/support/upsert"
appsv1 "k8s.io/api/apps/v1"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud.go b/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud.go
index 1e69ec79046..6a6dd7eeaa2 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud.go
@@ -8,7 +8,7 @@ import (
corev1 "k8s.io/api/core/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/upsert"
appsv1 "k8s.io/api/apps/v1"
rbacv1 "k8s.io/api/rbac/v1"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud_test.go b/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud_test.go
index 36fceef701e..582ff40bfba 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud_test.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/ibmcloud/ibmcloud_test.go
@@ -2,18 +2,19 @@ package ibmcloud
import (
"context"
+ "testing"
+
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
v1 "github.com/openshift/api/config/v1"
"github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capiibmv1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- "testing"
)
func TestReconcileCAPIInfraCR(t *testing.T) {
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt.go b/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt.go
index eae5730ce45..89fc37ae759 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt.go
@@ -4,7 +4,7 @@ import (
"context"
"os"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/images"
"github.com/openshift/hypershift/support/upsert"
appsv1 "k8s.io/api/apps/v1"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt_test.go b/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt_test.go
index 5fc4cae1af6..f34c496b52c 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt_test.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt/kubevirt_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/none/none.go b/hypershift-operator/controllers/hostedcluster/internal/platform/none/none.go
index 093ba0bc4ee..7aadab82601 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/none/none.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/none/none.go
@@ -3,7 +3,7 @@ package none
import (
"context"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/upsert"
appsv1 "k8s.io/api/apps/v1"
rbacv1 "k8s.io/api/rbac/v1"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go
index 23fd7e750d0..e3f3d6b097a 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/agent"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/aws"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/azure"
diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/powervs/powervs.go b/hypershift-operator/controllers/hostedcluster/internal/platform/powervs/powervs.go
index ac049511210..0fa9f73fff9 100644
--- a/hypershift-operator/controllers/hostedcluster/internal/platform/powervs/powervs.go
+++ b/hypershift-operator/controllers/hostedcluster/internal/platform/powervs/powervs.go
@@ -15,7 +15,7 @@ import (
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/upsert"
)
@@ -241,35 +241,6 @@ func (p PowerVS) ReconcileCredentials(ctx context.Context, c client.Client, crea
return fmt.Errorf("failed to reconcile node pool provider creds: %w", err)
}
- // Reconcile the platform provider node pool management credentials secret by
- // resolving the reference from the HostedCluster and syncing the secret in
- // the control plane namespace.
- err = c.Get(ctx, client.ObjectKey{Namespace: hcluster.GetNamespace(), Name: hcluster.Spec.Platform.PowerVS.ControlPlaneOperatorCreds.Name}, &src)
- if err != nil {
- return fmt.Errorf("failed to get control plane operator provider creds %s: %w", hcluster.Spec.Platform.PowerVS.ControlPlaneOperatorCreds.Name, err)
- }
- dest = &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: controlPlaneNamespace,
- Name: src.Name,
- },
- }
- _, err = createOrUpdate(ctx, c, dest, func() error {
- srcData, srcHasData := src.Data["ibm-credentials.env"]
- if !srcHasData {
- return fmt.Errorf("control plane operator provider credentials secret %q is missing credentials key", src.Name)
- }
- dest.Type = corev1.SecretTypeOpaque
- if dest.Data == nil {
- dest.Data = map[string][]byte{}
- }
- dest.Data["ibm-credentials.env"] = srcData
- return nil
- })
- if err != nil {
- return fmt.Errorf("failed to reconcile control plane operator provider creds: %w", err)
- }
-
// Reconcile the platform provider ingress operator credentials secret by
// resolving the reference from the HostedCluster and syncing the secret in
// the control plane namespace.
diff --git a/hypershift-operator/controllers/manifests/controlplaneoperator/manifests.go b/hypershift-operator/controllers/manifests/controlplaneoperator/manifests.go
index 663489c20e7..e6605f51bdc 100644
--- a/hypershift-operator/controllers/manifests/controlplaneoperator/manifests.go
+++ b/hypershift-operator/controllers/manifests/controlplaneoperator/manifests.go
@@ -1,7 +1,7 @@
package controlplaneoperator
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/hypershift-operator/controllers/nodepool/agent.go b/hypershift-operator/controllers/nodepool/agent.go
index 3a76161d1de..1592b1643e1 100644
--- a/hypershift-operator/controllers/nodepool/agent.go
+++ b/hypershift-operator/controllers/nodepool/agent.go
@@ -2,7 +2,7 @@ package nodepool
import (
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func agentMachineTemplateSpec(nodePool *hyperv1.NodePool) *agentv1.AgentMachineTemplateSpec {
diff --git a/hypershift-operator/controllers/nodepool/aws.go b/hypershift-operator/controllers/nodepool/aws.go
index e257d8b5ea1..6cc55debccd 100644
--- a/hypershift-operator/controllers/nodepool/aws.go
+++ b/hypershift-operator/controllers/nodepool/aws.go
@@ -3,7 +3,7 @@ package nodepool
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
k8sutilspointer "k8s.io/utils/pointer"
capiaws "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
)
diff --git a/hypershift-operator/controllers/nodepool/aws_test.go b/hypershift-operator/controllers/nodepool/aws_test.go
index ab09b0539a0..87be9af79de 100644
--- a/hypershift-operator/controllers/nodepool/aws_test.go
+++ b/hypershift-operator/controllers/nodepool/aws_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
k8sutilspointer "k8s.io/utils/pointer"
capiaws "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
diff --git a/hypershift-operator/controllers/nodepool/azure.go b/hypershift-operator/controllers/nodepool/azure.go
index d1d481f1fb2..7c324260724 100644
--- a/hypershift-operator/controllers/nodepool/azure.go
+++ b/hypershift-operator/controllers/nodepool/azure.go
@@ -10,7 +10,7 @@ import (
utilpointer "k8s.io/utils/pointer"
capiazure "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func azureMachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.NodePool, existing capiazure.AzureMachineTemplateSpec) (*capiazure.AzureMachineTemplateSpec, error) {
diff --git a/hypershift-operator/controllers/nodepool/azure_test.go b/hypershift-operator/controllers/nodepool/azure_test.go
index fd0d86cccbc..7807b9cfdad 100644
--- a/hypershift-operator/controllers/nodepool/azure_test.go
+++ b/hypershift-operator/controllers/nodepool/azure_test.go
@@ -3,7 +3,7 @@ package nodepool
import (
"testing"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestBootImage(t *testing.T) {
diff --git a/hypershift-operator/controllers/nodepool/conditions.go b/hypershift-operator/controllers/nodepool/conditions.go
index 0b8240ae835..2a9ceeff72c 100644
--- a/hypershift-operator/controllers/nodepool/conditions.go
+++ b/hypershift-operator/controllers/nodepool/conditions.go
@@ -3,7 +3,7 @@ package nodepool
import (
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -11,9 +11,9 @@ import (
// setStatusCondition sets the corresponding condition in conditions to newCondition.
// conditions must be non-nil.
-// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
-// newCondition, LastTransitionTime is set to now if the new status differs from the old status)
-// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
+// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
+// newCondition, LastTransitionTime is set to now if the new status differs from the old status)
+// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
func setStatusCondition(conditions *[]hyperv1.NodePoolCondition, newCondition hyperv1.NodePoolCondition) {
if conditions == nil {
return
diff --git a/hypershift-operator/controllers/nodepool/haproxy.go b/hypershift-operator/controllers/nodepool/haproxy.go
index b394bec051d..2fca020c9bb 100644
--- a/hypershift-operator/controllers/nodepool/haproxy.go
+++ b/hypershift-operator/controllers/nodepool/haproxy.go
@@ -13,7 +13,7 @@ import (
"github.com/clarketm/json"
ignitionapi "github.com/coreos/ignition/v2/config/v3_2/types"
api "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/ignition"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
diff --git a/hypershift-operator/controllers/nodepool/inplace.go b/hypershift-operator/controllers/nodepool/inplace.go
index 5446928e8c6..87e35d2dfda 100644
--- a/hypershift-operator/controllers/nodepool/inplace.go
+++ b/hypershift-operator/controllers/nodepool/inplace.go
@@ -5,7 +5,7 @@ import (
"strconv"
"github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
k8sutilspointer "k8s.io/utils/pointer"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
diff --git a/hypershift-operator/controllers/nodepool/kubevirt.go b/hypershift-operator/controllers/nodepool/kubevirt.go
index 4cd841b0c6f..681464188be 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/releaseinfo"
corev1 "k8s.io/api/core/v1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
diff --git a/hypershift-operator/controllers/nodepool/kubevirt_test.go b/hypershift-operator/controllers/nodepool/kubevirt_test.go
index 9799330317b..437587b0bf6 100644
--- a/hypershift-operator/controllers/nodepool/kubevirt_test.go
+++ b/hypershift-operator/controllers/nodepool/kubevirt_test.go
@@ -5,7 +5,7 @@ import (
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apiresource "k8s.io/apimachinery/pkg/api/resource"
@@ -107,7 +107,7 @@ func generateNodeTemplate(memory string, cpu uint32, image string, volumeSize st
Spec: kubevirtv1.VirtualMachineSpec{
RunStrategy: &runAlways,
DataVolumeTemplates: []kubevirtv1.DataVolumeTemplateSpec{
- kubevirtv1.DataVolumeTemplateSpec{
+ {
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "rhcos",
diff --git a/hypershift-operator/controllers/nodepool/manifests.go b/hypershift-operator/controllers/nodepool/manifests.go
index f0b94e20f57..9ec911d9ad2 100644
--- a/hypershift-operator/controllers/nodepool/manifests.go
+++ b/hypershift-operator/controllers/nodepool/manifests.go
@@ -3,7 +3,7 @@ package nodepool
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go
index 5a212f74534..3cfd748dffc 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go
@@ -20,7 +20,7 @@ import (
"github.com/openshift/api/operator/v1alpha1"
agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1alpha1"
api "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests/ignitionserver"
hyperutil "github.com/openshift/hypershift/hypershift-operator/controllers/util"
@@ -195,13 +195,6 @@ func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.HostedCluster, nodePool *hyperv1.NodePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
- // The nodeCount field got renamed, copy the old field for compatibility purposes if the
- // new one is unset.
- if nodePool.Spec.Replicas == nil {
- //lint:ignore SA1019 maintain backward compatibility
- nodePool.Spec.Replicas = nodePool.Spec.NodeCount
- }
-
// HostedCluster owns NodePools. This should ensure orphan NodePools are garbage collected when cascading deleting.
nodePool.OwnerReferences = util.EnsureOwnerRef(nodePool.OwnerReferences, metav1.OwnerReference{
APIVersion: hyperv1.GroupVersion.String(),
@@ -345,7 +338,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
ObservedGeneration: nodePool.Generation,
})
- // Validate platform specific input.
+ // Validate AWS platform specific input.
var ami string
if nodePool.Spec.Platform.Type == hyperv1.AWSPlatform {
if hcluster.Spec.Platform.AWS == nil {
@@ -354,13 +347,13 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
if nodePool.Spec.Platform.AWS.AMI != "" {
ami = nodePool.Spec.Platform.AWS.AMI
// User-defined AMIs cannot be validated
- removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidAMIConditionType)
+ removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidPlatformImageType)
} else {
// TODO: Should the region be included in the NodePool platform information?
ami, err = defaultNodePoolAMI(hcluster.Spec.Platform.AWS.Region, releaseImage)
if err != nil {
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidAMIConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionFalse,
Reason: hyperv1.NodePoolValidationFailedConditionReason,
Message: fmt.Sprintf("Couldn't discover an AMI for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
@@ -369,7 +362,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
return ctrl.Result{}, fmt.Errorf("couldn't discover an AMI for release image: %w", err)
}
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidAMIConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap AMI is %q", ami),
@@ -386,7 +379,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
coreOSPowerVSImage, powervsImageRegion, err = getPowerVSImage(hcluster.Spec.Platform.PowerVS.Region, releaseImage)
if err != nil {
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidPowerVSImageConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionFalse,
Reason: hyperv1.NodePoolValidationFailedConditionReason,
Message: fmt.Sprintf("Couldn't discover an PowerVS Image for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
@@ -396,7 +389,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
}
powervsBootImage = coreOSPowerVSImage.Release
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidPowerVSImageConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap PowerVS Image is %q", powervsBootImage),
@@ -409,7 +402,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
if nodePool.Spec.Platform.Type == hyperv1.KubevirtPlatform {
if err := kubevirtPlatformValidation(nodePool); err != nil {
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidKubevirtConfigConditionType,
+ Type: hyperv1.NodePoolValidMachineConfigConditionType,
Status: corev1.ConditionFalse,
Reason: hyperv1.NodePoolValidationFailedConditionReason,
Message: fmt.Sprintf("validation of NodePool KubeVirt platform failed: %s", err.Error()),
@@ -417,12 +410,12 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
})
return ctrl.Result{}, fmt.Errorf("validation of NodePool KubeVirt platform failed: %w", err)
}
- removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidKubevirtConfigConditionType)
+ removeStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolValidMachineConfigConditionType)
kubevirtBootImage, err = getKubeVirtImage(nodePool, releaseImage)
if err != nil {
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidKubeVirtImageConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionFalse,
Reason: hyperv1.NodePoolValidationFailedConditionReason,
Message: fmt.Sprintf("Couldn't discover an KubeVirt Image for release image %q: %s", nodePool.Spec.Release.Image, err.Error()),
@@ -431,7 +424,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho
return ctrl.Result{}, fmt.Errorf("couldn't discover an KubeVirt disk image in release payload image: %w", err)
}
setStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
- Type: hyperv1.NodePoolValidKubeVirtImageConditionType,
+ Type: hyperv1.NodePoolValidPlatformImageType,
Status: corev1.ConditionTrue,
Reason: hyperv1.NodePoolAsExpectedConditionReason,
Message: fmt.Sprintf("Bootstrap KubeVirt Image is %q", kubevirtBootImage),
diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
index 9637b4f6a29..e005dc8a7c9 100644
--- a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
+++ b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go
@@ -12,7 +12,7 @@ import (
"github.com/openshift/api/image/docker10"
imagev1 "github.com/openshift/api/image/v1"
api "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
"github.com/openshift/hypershift/support/releaseinfo"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client"
diff --git a/hypershift-operator/controllers/nodepool/powervs.go b/hypershift-operator/controllers/nodepool/powervs.go
index dc73f5e9293..a9e722c14d6 100644
--- a/hypershift-operator/controllers/nodepool/powervs.go
+++ b/hypershift-operator/controllers/nodepool/powervs.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strconv"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/releaseinfo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -67,7 +67,7 @@ func ibmPowerVSMachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hy
ImageRef: imageRef,
Network: subnet,
SysType: nodePool.Spec.Platform.PowerVS.SystemType,
- ProcType: nodePool.Spec.Platform.PowerVS.ProcessorType,
+ ProcType: string(nodePool.Spec.Platform.PowerVS.ProcessorType),
Processors: nodePool.Spec.Platform.PowerVS.Processors.String(),
Memory: strconv.Itoa(int(nodePool.Spec.Platform.PowerVS.MemoryGiB)),
},
@@ -111,8 +111,8 @@ func reconcileIBMPowerVSImage(ibmPowerVSImage *capipowervs.IBMPowerVSImage, hclu
Bucket: &img.Bucket,
Object: &img.Object,
Region: ®ion,
- StorageType: nodePool.Spec.Platform.PowerVS.StorageType,
- DeletePolicy: nodePool.Spec.Platform.PowerVS.ImageDeletePolicy,
+ StorageType: string(nodePool.Spec.Platform.PowerVS.StorageType),
+ DeletePolicy: string(nodePool.Spec.Platform.PowerVS.ImageDeletePolicy),
}
return nil
}
diff --git a/hypershift-operator/controllers/platform/aws/controller.go b/hypershift-operator/controllers/platform/aws/controller.go
index ea3cd31c6a8..dc664a8cb30 100644
--- a/hypershift-operator/controllers/platform/aws/controller.go
+++ b/hypershift-operator/controllers/platform/aws/controller.go
@@ -25,7 +25,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster"
diff --git a/hypershift-operator/controllers/platform/aws/controller_test.go b/hypershift-operator/controllers/platform/aws/controller_test.go
index 21fba04af0a..c53a67e590b 100644
--- a/hypershift-operator/controllers/platform/aws/controller_test.go
+++ b/hypershift-operator/controllers/platform/aws/controller_test.go
@@ -12,7 +12,7 @@ import (
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/elbv2/elbv2iface"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
hyperapi "github.com/openshift/hypershift/support/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
diff --git a/hypershift-operator/controllers/util/deployment.go b/hypershift-operator/controllers/util/deployment.go
index 8a6075926a0..3ba0fb5fe0d 100644
--- a/hypershift-operator/controllers/util/deployment.go
+++ b/hypershift-operator/controllers/util/deployment.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/hypershift-operator/main.go b/hypershift-operator/main.go
index ccc86a89de9..4a9b9b0b241 100644
--- a/hypershift-operator/main.go
+++ b/hypershift-operator/main.go
@@ -27,7 +27,7 @@ import (
"github.com/go-logr/logr"
operatorv1 "github.com/openshift/api/operator/v1"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
"github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster"
"github.com/openshift/hypershift/hypershift-operator/controllers/nodepool"
diff --git a/hypershift-operator/metrics.go b/hypershift-operator/metrics.go
index 457a25aa2e8..5de016054a0 100644
--- a/hypershift-operator/metrics.go
+++ b/hypershift-operator/metrics.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/go-logr/logr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/prometheus/client_golang/prometheus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -192,8 +192,7 @@ func clusterCreationTime(hc *hyperv1.HostedCluster) *float64 {
var expectedNPConditionStates = map[string]bool{
hyperv1.NodePoolValidHostedClusterConditionType: true,
hyperv1.NodePoolValidReleaseImageConditionType: true,
- hyperv1.NodePoolValidAMIConditionType: true,
- hyperv1.NodePoolValidPowerVSImageConditionType: true,
+ hyperv1.NodePoolValidPlatformImageType: true,
hyperv1.NodePoolValidMachineConfigConditionType: true,
hyperv1.NodePoolReadyConditionType: true,
hyperv1.NodePoolUpdatingVersionConditionType: false,
diff --git a/hypershift-operator/metrics_test.go b/hypershift-operator/metrics_test.go
index aa5e36e309b..c7f9d438a60 100644
--- a/hypershift-operator/metrics_test.go
+++ b/hypershift-operator/metrics_test.go
@@ -9,7 +9,7 @@ import (
"github.com/google/go-cmp/cmp"
configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"go.uber.org/zap/zaptest"
diff --git a/ignition-server/cmd/start.go b/ignition-server/cmd/start.go
index aafd7d24fb6..25c4b44435b 100644
--- a/ignition-server/cmd/start.go
+++ b/ignition-server/cmd/start.go
@@ -14,7 +14,7 @@ import (
"time"
hyperapi "github.com/openshift/hypershift/api"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/ignition-server/controllers"
"github.com/openshift/hypershift/pkg/version"
"github.com/openshift/hypershift/support/releaseinfo"
diff --git a/ignition-server/controllers/local_ignitionprovider.go b/ignition-server/controllers/local_ignitionprovider.go
index 7acb740faac..b647fb15c63 100644
--- a/ignition-server/controllers/local_ignitionprovider.go
+++ b/ignition-server/controllers/local_ignitionprovider.go
@@ -15,7 +15,7 @@ import (
"sync"
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/certs"
"github.com/openshift/hypershift/support/releaseinfo"
diff --git a/ignition-server/controllers/machineconfigserver_ignitionprovider.go b/ignition-server/controllers/machineconfigserver_ignitionprovider.go
index d6df429dc54..ab0c25430c5 100644
--- a/ignition-server/controllers/machineconfigserver_ignitionprovider.go
+++ b/ignition-server/controllers/machineconfigserver_ignitionprovider.go
@@ -14,7 +14,7 @@ import (
"text/template"
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/support/releaseinfo"
corev1 "k8s.io/api/core/v1"
diff --git a/ignition-server/controllers/tokensecret_controller.go b/ignition-server/controllers/tokensecret_controller.go
index ee6014145b5..0ca979da5ae 100644
--- a/ignition-server/controllers/tokensecret_controller.go
+++ b/ignition-server/controllers/tokensecret_controller.go
@@ -13,7 +13,7 @@ import (
"github.com/go-logr/logr"
"github.com/google/uuid"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/prometheus/client_golang/prometheus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -84,14 +84,15 @@ type IgnitionProvider interface {
// stores it in the PayloadsStore, and rotates the token ID periodically.
// A token Secret is by contractual convention:
// type: Secret
-// metadata:
-// annotations:
-// hypershift.openshift.io/ignition-config: "true"
-// data:
-// token:
-// old_token:
-// release:
-// config: |-
+//
+// metadata:
+// annotations:
+// hypershift.openshift.io/ignition-config: "true"
+// data:
+// token:
+// old_token:
+// release:
+// config: |-
type TokenSecretReconciler struct {
client.Client
IgnitionProvider IgnitionProvider
diff --git a/ignition-server/controllers/tokensecret_controller_test.go b/ignition-server/controllers/tokensecret_controller_test.go
index 73b59cfe1c4..5bfd1a7b903 100644
--- a/ignition-server/controllers/tokensecret_controller_test.go
+++ b/ignition-server/controllers/tokensecret_controller_test.go
@@ -9,7 +9,7 @@ import (
"github.com/google/uuid"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/support/api/scheme.go b/support/api/scheme.go
index 66132f08698..8bde72052fc 100644
--- a/support/api/scheme.go
+++ b/support/api/scheme.go
@@ -9,7 +9,8 @@ import (
osinv1 "github.com/openshift/api/osin/v1"
routev1 "github.com/openshift/api/route/v1"
securityv1 "github.com/openshift/api/security/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1alpha1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1beta1 "github.com/openshift/hypershift/api/v1beta1"
mcfgv1 "github.com/openshift/hypershift/thirdparty/machineconfigoperator/pkg/apis/machineconfiguration.openshift.io/v1"
prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
@@ -51,7 +52,8 @@ func init() {
clientgoscheme.AddToScheme(Scheme)
auditv1.AddToScheme(Scheme)
apiregistrationv1.AddToScheme(Scheme)
- hyperv1.AddToScheme(Scheme)
+ hyperv1alpha1.AddToScheme(Scheme)
+ hyperv1beta1.AddToScheme(Scheme)
capiv1.AddToScheme(Scheme)
configv1.AddToScheme(Scheme)
securityv1.AddToScheme(Scheme)
diff --git a/support/capabilities/management_cluster_capabilities_test.go b/support/capabilities/management_cluster_capabilities_test.go
index 4d9c629d4b7..76e82f3d9b8 100644
--- a/support/capabilities/management_cluster_capabilities_test.go
+++ b/support/capabilities/management_cluster_capabilities_test.go
@@ -7,7 +7,7 @@ import (
configv1 "github.com/openshift/api/config/v1"
routev1 "github.com/openshift/api/route/v1"
securityv1 "github.com/openshift/api/security/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
diff --git a/support/config/deployment.go b/support/config/deployment.go
index 0398b5911c8..6922022d5dd 100644
--- a/support/config/deployment.go
+++ b/support/config/deployment.go
@@ -1,7 +1,7 @@
package config
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/support/config/deployment_test.go b/support/config/deployment_test.go
index e3b665c37e6..db2b55ae1c1 100644
--- a/support/config/deployment_test.go
+++ b/support/config/deployment_test.go
@@ -5,7 +5,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/support/config/ingress.go b/support/config/ingress.go
index a8177b2fd85..b04348db1b3 100644
--- a/support/config/ingress.go
+++ b/support/config/ingress.go
@@ -3,7 +3,7 @@ package config
import (
"fmt"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func IngressSubdomain(hcp *hyperv1.HostedControlPlane) string {
diff --git a/support/config/ownerref.go b/support/config/ownerref.go
index 8ad3dd3df8a..78e767055e4 100644
--- a/support/config/ownerref.go
+++ b/support/config/ownerref.go
@@ -5,6 +5,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+ "github.com/blang/semver"
+ hyperv1alpha1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/api"
"github.com/openshift/hypershift/support/util"
)
@@ -30,3 +33,12 @@ func ControllerOwnerRef(obj client.Object) *metav1.OwnerReference {
}
return metav1.NewControllerRef(obj, gvk)
}
+
+// MutatingOwnerRefFromHCP returns ownerRef with altered API version based on OCP release version
+func MutatingOwnerRefFromHCP(hcp *hyperv1.HostedControlPlane, version semver.Version) OwnerRef {
+ ownerRef := OwnerRefFrom(hcp)
+ if version.Major == 4 && version.Minor < 12 {
+ ownerRef.Reference.APIVersion = hyperv1alpha1.GroupVersion.String()
+ }
+ return ownerRef
+}
diff --git a/support/config/ownerref_test.go b/support/config/ownerref_test.go
new file mode 100644
index 00000000000..d60a45a1f6b
--- /dev/null
+++ b/support/config/ownerref_test.go
@@ -0,0 +1,57 @@
+package config
+
+import (
+ "testing"
+
+ . "github.com/onsi/gomega"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
+
+ "github.com/blang/semver"
+)
+
+func TestMutatingOwnerRefFromHCP(t *testing.T) {
+ tests := []struct {
+ name string
+ releaseVersion semver.Version
+ expected string
+ }{
+ {
+ name: "4.9.0 should use v1alpha1",
+ releaseVersion: semver.MustParse("4.9.0"),
+ expected: "hypershift.openshift.io/v1alpha1",
+ },
+ {
+ name: "4.10.0 should use v1alpha1",
+ releaseVersion: semver.MustParse("4.10.0"),
+ expected: "hypershift.openshift.io/v1alpha1",
+ },
+ {
+ name: "4.11.0 should use v1alpha1",
+ releaseVersion: semver.MustParse("4.11.0"),
+ expected: "hypershift.openshift.io/v1alpha1",
+ },
+ {
+ name: "4.12.0 should use v1beta1",
+ releaseVersion: semver.MustParse("4.12.0"),
+ expected: "hypershift.openshift.io/v1beta1",
+ },
+ {
+ name: "4.13.0 should use v1beta1",
+ releaseVersion: semver.MustParse("4.13.0"),
+ expected: "hypershift.openshift.io/v1beta1",
+ },
+ {
+ name: "default should use v1beta1",
+ releaseVersion: semver.MustParse("0.0.0"),
+ expected: "hypershift.openshift.io/v1beta1",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ g := NewWithT(t)
+ result := MutatingOwnerRefFromHCP(&hyperv1.HostedControlPlane{}, test.releaseVersion)
+ g.Expect(result.Reference.APIVersion).To(Equal(test.expected))
+ })
+ }
+}
diff --git a/support/conversiontest/fuzz.go b/support/conversiontest/fuzz.go
new file mode 100644
index 00000000000..f33e3fd7269
--- /dev/null
+++ b/support/conversiontest/fuzz.go
@@ -0,0 +1,126 @@
+package conversiontest
+
+import (
+ "math/rand"
+ "testing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/google/go-cmp/cmp"
+ fuzz "github.com/google/gofuzz"
+ "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ metafuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer"
+ "k8s.io/apimachinery/pkg/runtime"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/kubectl/pkg/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// Source: https://github.com/kubernetes-sigs/cluster-api/blob/1ec0cd6174f1b860dc466db587241ea7edea0b9f/util/conversion/conversion.go
+
+// GetFuzzer returns a new fuzzer to be used for testing.
+func GetFuzzer(scheme *runtime.Scheme, funcs ...fuzzer.FuzzerFuncs) *fuzz.Fuzzer {
+ funcs = append([]fuzzer.FuzzerFuncs{
+ metafuzzer.Funcs,
+ func(_ runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ // Custom fuzzer for metav1.Time pointers which weren't
+ // fuzzed and always resulted in `nil` values.
+ // This implementation is somewhat similar to the one provided
+ // in the metafuzzer.Funcs.
+ func(input *metav1.Time, c fuzz.Continue) {
+ if input != nil {
+ var sec, nsec uint32
+ c.Fuzz(&sec)
+ c.Fuzz(&nsec)
+ fuzzed := metav1.Unix(int64(sec), int64(nsec)).Rfc3339Copy()
+ input.Time = fuzzed.Time
+ }
+ },
+ }
+ },
+ }, funcs...)
+ return fuzzer.FuzzerFor(
+ fuzzer.MergeFuzzerFuncs(funcs...),
+ rand.NewSource(rand.Int63()), //nolint:gosec
+ runtimeserializer.NewCodecFactory(scheme),
+ ).NumElements(1, 5)
+}
+
+// FuzzTestFuncInput contains input parameters
+// for the FuzzTestFunc function.
+type FuzzTestFuncInput struct {
+ Scheme *runtime.Scheme
+
+ Hub conversion.Hub
+ HubAfterMutation func(conversion.Hub)
+
+ Spoke conversion.Convertible
+ SpokeAfterMutation func(convertible conversion.Convertible)
+ SkipSpokeAnnotationCleanup bool
+
+ FuzzerFuncs []fuzzer.FuzzerFuncs
+}
+
+// FuzzTestFunc returns a new testing function to be used in tests to make sure conversions between
+// the Hub version of an object and an older version aren't lossy.
+func FuzzTestFunc(input FuzzTestFuncInput) func(*testing.T) {
+ if input.Scheme == nil {
+ input.Scheme = scheme.Scheme
+ }
+
+ return func(t *testing.T) {
+ t.Helper()
+
+ t.Run("spoke-hub-spoke", func(t *testing.T) {
+ g := gomega.NewWithT(t)
+ fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...)
+
+ for i := 0; i < 100; i++ {
+ // Create the spoke and fuzz it
+ spokeBefore := input.Spoke.DeepCopyObject().(conversion.Convertible)
+ fuzzer.Fuzz(spokeBefore)
+
+ // First convert spoke to hub
+ hubCopy := input.Hub.DeepCopyObject().(conversion.Hub)
+ g.Expect(spokeBefore.ConvertTo(hubCopy)).To(gomega.Succeed())
+
+ // Convert hub back to spoke and check if the resulting spoke is equal to the spoke before the round trip
+ spokeAfter := input.Spoke.DeepCopyObject().(conversion.Convertible)
+ g.Expect(spokeAfter.ConvertFrom(hubCopy)).To(gomega.Succeed())
+
+ if input.SpokeAfterMutation != nil {
+ input.SpokeAfterMutation(spokeAfter)
+ }
+
+ g.Expect(apiequality.Semantic.DeepEqual(spokeBefore, spokeAfter)).To(gomega.BeTrue(), cmp.Diff(spokeBefore, spokeAfter))
+ }
+ })
+
+ t.Run("hub-spoke-hub", func(t *testing.T) {
+ g := gomega.NewWithT(t)
+ fuzzer := GetFuzzer(input.Scheme, input.FuzzerFuncs...)
+
+ for i := 0; i < 100; i++ {
+ // Create the hub and fuzz it
+ hubBefore := input.Hub.DeepCopyObject().(conversion.Hub)
+ fuzzer.Fuzz(hubBefore)
+
+ // First convert hub to spoke
+ dstCopy := input.Spoke.DeepCopyObject().(conversion.Convertible)
+ g.Expect(dstCopy.ConvertFrom(hubBefore)).To(gomega.Succeed())
+
+ // Convert spoke back to hub and check if the resulting hub is equal to the hub before the round trip
+ hubAfter := input.Hub.DeepCopyObject().(conversion.Hub)
+ g.Expect(dstCopy.ConvertTo(hubAfter)).To(gomega.Succeed())
+
+ if input.HubAfterMutation != nil {
+ input.HubAfterMutation(hubAfter)
+ }
+ g.Expect(apiequality.Semantic.DeepEqual(hubBefore, hubAfter)).To(gomega.BeTrue(), cmp.Diff(hubBefore, hubAfter))
+ }
+ })
+ }
+}
diff --git a/support/globalconfig/dns.go b/support/globalconfig/dns.go
index 366c15f81dc..9e732b657c7 100644
--- a/support/globalconfig/dns.go
+++ b/support/globalconfig/dns.go
@@ -6,7 +6,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func DNSConfig() *configv1.DNS {
diff --git a/support/globalconfig/dns_test.go b/support/globalconfig/dns_test.go
index 69b096c9332..005a533ade1 100644
--- a/support/globalconfig/dns_test.go
+++ b/support/globalconfig/dns_test.go
@@ -2,11 +2,12 @@ package globalconfig
import (
"fmt"
+ "testing"
+
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "testing"
)
func TestReconcileDNSConfig(t *testing.T) {
diff --git a/support/globalconfig/imagecontentsource.go b/support/globalconfig/imagecontentsource.go
index 0b0cca9253f..eada524a39e 100644
--- a/support/globalconfig/imagecontentsource.go
+++ b/support/globalconfig/imagecontentsource.go
@@ -4,7 +4,7 @@ import (
operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func ImageContentSourcePolicy() *operatorv1alpha1.ImageContentSourcePolicy {
diff --git a/support/globalconfig/infrastructure.go b/support/globalconfig/infrastructure.go
index 3052690507b..e921b16bbbc 100644
--- a/support/globalconfig/infrastructure.go
+++ b/support/globalconfig/infrastructure.go
@@ -7,7 +7,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func InfrastructureConfig() *configv1.Infrastructure {
diff --git a/support/globalconfig/ingress.go b/support/globalconfig/ingress.go
index ca85edd16af..0a0c71400a0 100644
--- a/support/globalconfig/ingress.go
+++ b/support/globalconfig/ingress.go
@@ -6,7 +6,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func IngressConfig() *configv1.Ingress {
diff --git a/support/globalconfig/installconfig.go b/support/globalconfig/installconfig.go
index a9eee1b9b5d..50d4e3213ff 100644
--- a/support/globalconfig/installconfig.go
+++ b/support/globalconfig/installconfig.go
@@ -5,7 +5,7 @@ import (
"fmt"
"text/template"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/util"
)
diff --git a/support/globalconfig/network.go b/support/globalconfig/network.go
index ba07439087b..74ee89b4757 100644
--- a/support/globalconfig/network.go
+++ b/support/globalconfig/network.go
@@ -4,7 +4,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/util"
)
diff --git a/support/globalconfig/observed.go b/support/globalconfig/observed.go
index 9bcbb3dcdcf..6c2bebd3046 100644
--- a/support/globalconfig/observed.go
+++ b/support/globalconfig/observed.go
@@ -19,6 +19,12 @@ const (
observedConfigKey = "config"
)
+type ObservedConfig struct {
+ Image *configv1.Image
+ Build *configv1.Build
+ Project *configv1.Project
+}
+
func ReconcileObservedConfig(cm *corev1.ConfigMap, config runtime.Object) error {
serializedConfig, err := util.SerializeResource(config, api.Scheme)
if err != nil {
diff --git a/support/globalconfig/proxy.go b/support/globalconfig/proxy.go
index 23369c1827d..2451f3ed005 100644
--- a/support/globalconfig/proxy.go
+++ b/support/globalconfig/proxy.go
@@ -5,7 +5,7 @@ import (
"strings"
configv1 "github.com/openshift/api/config/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
diff --git a/support/upsert/loopdetector.go b/support/upsert/loopdetector.go
index 047975720e9..b6639f36e6e 100644
--- a/support/upsert/loopdetector.go
+++ b/support/upsert/loopdetector.go
@@ -9,7 +9,7 @@ import (
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
diff --git a/support/util/networking.go b/support/util/networking.go
index c01b7fb824e..b157112f872 100644
--- a/support/util/networking.go
+++ b/support/util/networking.go
@@ -1,7 +1,7 @@
package util
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func MachineCIDRs(machineNetwork []hyperv1.MachineNetworkEntry) []string {
diff --git a/support/util/ownerref.go b/support/util/ownerref.go
index 66a79197fa5..bd1eb3c03d7 100644
--- a/support/util/ownerref.go
+++ b/support/util/ownerref.go
@@ -21,7 +21,9 @@ func EnsureOwnerRef(resource client.Object, ownerRef *metav1.OwnerReference) {
func getOwnerRefIndex(list []metav1.OwnerReference, ref *metav1.OwnerReference) int {
for i := range list {
- if list[i].Kind == ref.Kind && list[i].APIVersion == ref.APIVersion && list[i].Name == ref.Name {
+ // NOTE: The APIVersion may have changed with a new API Version, however the UID should remain the
+ // same. Use either to identify the owner reference.
+ if list[i].Kind == ref.Kind && (list[i].APIVersion == ref.APIVersion || list[i].UID == ref.UID) && list[i].Name == ref.Name {
return i
}
}
diff --git a/support/util/pausereconcile.go b/support/util/pausereconcile.go
index 807e8d5acb0..7e456c072d3 100644
--- a/support/util/pausereconcile.go
+++ b/support/util/pausereconcile.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/go-logr/logr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
diff --git a/support/util/pausereconcile_test.go b/support/util/pausereconcile_test.go
index f566739b710..c8c2be0b37e 100644
--- a/support/util/pausereconcile_test.go
+++ b/support/util/pausereconcile_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
diff --git a/support/util/public.go b/support/util/public.go
index e0ab1d2b516..1d67f95767e 100644
--- a/support/util/public.go
+++ b/support/util/public.go
@@ -1,7 +1,7 @@
package util
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
// HasPrivateAPIServerConnectivity determines if workloads running inside the guest cluster can access
diff --git a/support/util/public_test.go b/support/util/public_test.go
index b2ef827a4d6..55560c9381d 100644
--- a/support/util/public_test.go
+++ b/support/util/public_test.go
@@ -3,7 +3,7 @@ package util
import (
"testing"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestConnectsThroughInternetToControlplane(t *testing.T) {
diff --git a/support/util/visibility.go b/support/util/visibility.go
index e80376de2c2..b251719527d 100644
--- a/support/util/visibility.go
+++ b/support/util/visibility.go
@@ -1,7 +1,7 @@
package util
import (
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func IsPrivateHCP(hcp *hyperv1.HostedControlPlane) bool {
diff --git a/support/util/visibility_test.go b/support/util/visibility_test.go
index 4616c37d6ad..8cd30a1ffa6 100644
--- a/support/util/visibility_test.go
+++ b/support/util/visibility_test.go
@@ -3,7 +3,7 @@ package util
import (
"testing"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
)
func TestIsPrivateHCP(t *testing.T) {
diff --git a/test/e2e/autoscaling_test.go b/test/e2e/autoscaling_test.go
index 76fb8848ace..5f787790a35 100644
--- a/test/e2e/autoscaling_test.go
+++ b/test/e2e/autoscaling_test.go
@@ -10,7 +10,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/test/e2e/chaos_test.go b/test/e2e/chaos_test.go
index e8bea9d8db0..62e631691ce 100644
--- a/test/e2e/chaos_test.go
+++ b/test/e2e/chaos_test.go
@@ -17,7 +17,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/test/e2e/control_plane_upgrade_test.go b/test/e2e/control_plane_upgrade_test.go
index 7618970a86c..6752ec6f00a 100644
--- a/test/e2e/control_plane_upgrade_test.go
+++ b/test/e2e/control_plane_upgrade_test.go
@@ -8,7 +8,7 @@ import (
"testing"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/test/e2e/create_cluster_test.go b/test/e2e/create_cluster_test.go
index 392ad53bff2..a48e2b15a91 100644
--- a/test/e2e/create_cluster_test.go
+++ b/test/e2e/create_cluster_test.go
@@ -9,7 +9,7 @@ import (
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index 7f6e6ece079..670f030731a 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -19,7 +19,7 @@ import (
"time"
"github.com/go-logr/logr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/core"
"github.com/openshift/hypershift/cmd/cluster/kubevirt"
"github.com/openshift/hypershift/cmd/version"
diff --git a/test/e2e/nodepool_machineconfig_test.go b/test/e2e/nodepool_machineconfig_test.go
index e75e5110cbf..8f8c6eacdc6 100644
--- a/test/e2e/nodepool_machineconfig_test.go
+++ b/test/e2e/nodepool_machineconfig_test.go
@@ -13,7 +13,7 @@ import (
. "github.com/onsi/gomega"
ignitionapi "github.com/coreos/ignition/v2/config/v3_2/types"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
hyperapi "github.com/openshift/hypershift/support/api"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
mcfgv1 "github.com/openshift/hypershift/thirdparty/machineconfigoperator/pkg/apis/machineconfiguration.openshift.io/v1"
diff --git a/test/e2e/nodepool_upgrade_test.go b/test/e2e/nodepool_upgrade_test.go
index d11c6457e08..2ea8b536d20 100644
--- a/test/e2e/nodepool_upgrade_test.go
+++ b/test/e2e/nodepool_upgrade_test.go
@@ -14,7 +14,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
. "github.com/onsi/gomega"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/support/releaseinfo"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
"k8s.io/apimachinery/pkg/util/intstr"
diff --git a/test/e2e/util/dump/dump.go b/test/e2e/util/dump/dump.go
index d83b3ab3205..45076593af0 100644
--- a/test/e2e/util/dump/dump.go
+++ b/test/e2e/util/dump/dump.go
@@ -8,7 +8,7 @@ import (
"testing"
"github.com/go-logr/zapr"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/core"
consolelogsaws "github.com/openshift/hypershift/cmd/consolelogs/aws"
"github.com/openshift/hypershift/support/upsert"
diff --git a/test/e2e/util/dump/journals.go b/test/e2e/util/dump/journals.go
index eee572786b7..7b9c10e91fe 100644
--- a/test/e2e/util/dump/journals.go
+++ b/test/e2e/util/dump/journals.go
@@ -16,7 +16,7 @@ import (
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
bastionaws "github.com/openshift/hypershift/cmd/bastion/aws"
awsutil "github.com/openshift/hypershift/cmd/infra/aws/util"
cmdutil "github.com/openshift/hypershift/cmd/util"
diff --git a/test/e2e/util/fixture.go b/test/e2e/util/fixture.go
index 32e51677aa9..6bc712fa85b 100644
--- a/test/e2e/util/fixture.go
+++ b/test/e2e/util/fixture.go
@@ -10,7 +10,7 @@ import (
. "github.com/onsi/gomega"
operatorv1 "github.com/openshift/api/operator/v1"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/cmd/cluster/aws"
"github.com/openshift/hypershift/cmd/cluster/azure"
"github.com/openshift/hypershift/cmd/cluster/core"
diff --git a/test/e2e/util/kubevirt.go b/test/e2e/util/kubevirt.go
index 35571a31d38..09d70db9139 100644
--- a/test/e2e/util/kubevirt.go
+++ b/test/e2e/util/kubevirt.go
@@ -16,7 +16,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
)
diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go
index 158e1cd93ee..10e09e1de93 100644
--- a/test/e2e/util/util.go
+++ b/test/e2e/util/util.go
@@ -29,7 +29,7 @@ import (
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
- hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
+ hyperv1 "github.com/openshift/hypershift/api/v1beta1"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
)
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
new file mode 100644
index 00000000000..5db6d52d473
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=imagepolicy.k8s.io
+
+package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1"
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
new file mode 100644
index 00000000000..990f11c046d
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
@@ -0,0 +1,1375 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
+
+package v1alpha1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ImageReview) Reset() { *m = ImageReview{} }
+func (*ImageReview) ProtoMessage() {}
+func (*ImageReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_834793af728657a5, []int{0}
+}
+func (m *ImageReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageReview.Merge(m, src)
+}
+func (m *ImageReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReview proto.InternalMessageInfo
+
+func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} }
+func (*ImageReviewContainerSpec) ProtoMessage() {}
+func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_834793af728657a5, []int{1}
+}
+func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src)
+}
+func (m *ImageReviewContainerSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo
+
+func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} }
+func (*ImageReviewSpec) ProtoMessage() {}
+func (*ImageReviewSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_834793af728657a5, []int{2}
+}
+func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageReviewSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageReviewSpec.Merge(m, src)
+}
+func (m *ImageReviewSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageReviewSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo
+
+func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} }
+func (*ImageReviewStatus) ProtoMessage() {}
+func (*ImageReviewStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_834793af728657a5, []int{3}
+}
+func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageReviewStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageReviewStatus.Merge(m, src)
+}
+func (m *ImageReviewStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageReviewStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview")
+ proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec")
+ proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec")
+ proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry")
+ proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus")
+ proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_834793af728657a5)
+}
+
+var fileDescriptor_834793af728657a5 = []byte{
+ // 609 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x4c,
+ 0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0x38, 0x55, 0x90,
+ 0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x55, 0x2a, 0x0b, 0x40, 0x1a, 0x76,
+ 0x5d, 0x31, 0x71, 0x2e, 0x8e, 0x49, 0x3c, 0x63, 0x79, 0xc6, 0x29, 0xd9, 0xf1, 0x04, 0x88, 0x37,
+ 0xe0, 0x45, 0x78, 0x80, 0x2e, 0xbb, 0xec, 0xaa, 0xa2, 0x61, 0xc9, 0x4b, 0x20, 0x8f, 0x9d, 0xd8,
+ 0x24, 0x45, 0x55, 0x76, 0xbe, 0xf7, 0xce, 0xf9, 0xdd, 0xe3, 0xe3, 0x91, 0xd1, 0xc9, 0xf0, 0x50,
+ 0x91, 0x40, 0xd2, 0x61, 0xd2, 0x83, 0x58, 0x80, 0x06, 0x45, 0xc7, 0x20, 0xfa, 0x32, 0xa6, 0xf9,
+ 0x80, 0x47, 0x01, 0x0d, 0x42, 0xee, 0x43, 0x24, 0x47, 0x81, 0x37, 0xa1, 0xe3, 0x7d, 0x3e, 0x8a,
+ 0x06, 0x7c, 0x9f, 0xfa, 0x20, 0x20, 0xe6, 0x1a, 0xfa, 0x24, 0x8a, 0xa5, 0x96, 0xb8, 0x9d, 0x09,
+ 0x08, 0x8f, 0x02, 0x52, 0x12, 0x90, 0x99, 0xa0, 0xf5, 0xd8, 0x0f, 0xf4, 0x20, 0xe9, 0x11, 0x4f,
+ 0x86, 0xd4, 0x97, 0xbe, 0xa4, 0x46, 0xd7, 0x4b, 0x3e, 0x9a, 0xca, 0x14, 0xe6, 0x29, 0xe3, 0xb5,
+ 0x9e, 0x16, 0x06, 0x42, 0xee, 0x0d, 0x02, 0x01, 0xf1, 0x84, 0x46, 0x43, 0x3f, 0x6d, 0x28, 0x1a,
+ 0x82, 0xe6, 0x74, 0xbc, 0xe4, 0xa2, 0x45, 0xff, 0xa5, 0x8a, 0x13, 0xa1, 0x83, 0x10, 0x96, 0x04,
+ 0xcf, 0x6e, 0x13, 0x28, 0x6f, 0x00, 0x21, 0x5f, 0xd4, 0x75, 0xbe, 0x57, 0x51, 0xe3, 0x75, 0xfa,
+ 0x9a, 0x0c, 0xc6, 0x01, 0x9c, 0xe1, 0x0f, 0x68, 0x2b, 0xf5, 0xd4, 0xe7, 0x9a, 0xdb, 0xd6, 0xae,
+ 0xb5, 0xd7, 0x38, 0x78, 0x42, 0x8a, 0x44, 0xe6, 0x68, 0x12, 0x0d, 0xfd, 0xb4, 0xa1, 0x48, 0x7a,
+ 0x9a, 0x8c, 0xf7, 0xc9, 0xbb, 0xde, 0x27, 0xf0, 0xf4, 0x1b, 0xd0, 0xdc, 0xc5, 0xe7, 0x57, 0xed,
+ 0xca, 0xf4, 0xaa, 0x8d, 0x8a, 0x1e, 0x9b, 0x53, 0x31, 0x43, 0x6b, 0x2a, 0x02, 0xcf, 0xae, 0x2e,
+ 0xd1, 0x6f, 0xcc, 0x9b, 0x94, 0xdc, 0xbd, 0x8f, 0xc0, 0x73, 0xff, 0xcb, 0xe9, 0x6b, 0x69, 0xc5,
+ 0x0c, 0x0b, 0x9f, 0xa2, 0x0d, 0xa5, 0xb9, 0x4e, 0x94, 0x5d, 0x33, 0xd4, 0x83, 0x95, 0xa8, 0x46,
+ 0xe9, 0xde, 0xc9, 0xb9, 0x1b, 0x59, 0xcd, 0x72, 0x62, 0xe7, 0x15, 0xb2, 0x4b, 0x87, 0x8f, 0xa4,
+ 0xd0, 0x3c, 0x8d, 0x20, 0xdd, 0x8e, 0x1f, 0xa0, 0x75, 0x43, 0x37, 0x51, 0xd5, 0xdd, 0xff, 0x73,
+ 0xc4, 0x7a, 0x26, 0xc8, 0x66, 0x9d, 0xdf, 0x55, 0xb4, 0xbd, 0xf0, 0x12, 0x38, 0x44, 0xc8, 0x9b,
+ 0x91, 0x94, 0x6d, 0xed, 0xd6, 0xf6, 0x1a, 0x07, 0xcf, 0x57, 0x31, 0xfd, 0x97, 0x8f, 0x22, 0xf1,
+ 0x79, 0x5b, 0xb1, 0xd2, 0x02, 0xfc, 0x19, 0x35, 0xb8, 0x10, 0x52, 0x73, 0x1d, 0x48, 0xa1, 0xec,
+ 0xaa, 0xd9, 0xd7, 0x5d, 0x35, 0x7a, 0xd2, 0x2d, 0x18, 0xc7, 0x42, 0xc7, 0x13, 0xf7, 0x6e, 0xbe,
+ 0xb7, 0x51, 0x9a, 0xb0, 0xf2, 0x2a, 0x4c, 0x51, 0x5d, 0xf0, 0x10, 0x54, 0xc4, 0x3d, 0x30, 0x1f,
+ 0xa7, 0xee, 0xee, 0xe4, 0xa2, 0xfa, 0xdb, 0xd9, 0x80, 0x15, 0x67, 0x5a, 0x2f, 0x51, 0x73, 0x71,
+ 0x0d, 0x6e, 0xa2, 0xda, 0x10, 0x26, 0x59, 0xc8, 0x2c, 0x7d, 0xc4, 0xf7, 0xd0, 0xfa, 0x98, 0x8f,
+ 0x12, 0x30, 0xb7, 0xa8, 0xce, 0xb2, 0xe2, 0x45, 0xf5, 0xd0, 0xea, 0xfc, 0xa8, 0xa2, 0x9d, 0xa5,
+ 0x8f, 0x8b, 0x1f, 0xa1, 0x4d, 0x3e, 0x1a, 0xc9, 0x33, 0xe8, 0x1b, 0xca, 0x96, 0xbb, 0x9d, 0x9b,
+ 0xd8, 0xec, 0x66, 0x6d, 0x36, 0x9b, 0xe3, 0x87, 0x68, 0x23, 0x06, 0xae, 0xa4, 0xc8, 0xd8, 0xc5,
+ 0xbd, 0x60, 0xa6, 0xcb, 0xf2, 0x29, 0xfe, 0x6a, 0xa1, 0x26, 0x4f, 0xfa, 0x81, 0x2e, 0xd9, 0xb5,
+ 0x6b, 0x26, 0xd9, 0x93, 0xd5, 0xaf, 0x1f, 0xe9, 0x2e, 0xa0, 0xb2, 0x80, 0xed, 0x7c, 0x79, 0x73,
+ 0x71, 0xcc, 0x96, 0x76, 0xb7, 0x8e, 0xd0, 0xfd, 0x1b, 0x21, 0xab, 0xc4, 0xe7, 0x1e, 0x9f, 0x5f,
+ 0x3b, 0x95, 0x8b, 0x6b, 0xa7, 0x72, 0x79, 0xed, 0x54, 0xbe, 0x4c, 0x1d, 0xeb, 0x7c, 0xea, 0x58,
+ 0x17, 0x53, 0xc7, 0xba, 0x9c, 0x3a, 0xd6, 0xcf, 0xa9, 0x63, 0x7d, 0xfb, 0xe5, 0x54, 0x4e, 0xdb,
+ 0xb7, 0xfc, 0x55, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x2c, 0xa6, 0xdf, 0x90, 0x05, 0x00,
+ 0x00,
+}
+
+func (m *ImageReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Containers) > 0 {
+ for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.AuditAnnotations) > 0 {
+ keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
+ for k := range m.AuditAnnotations {
+ keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAuditAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x12
+ i--
+ if m.Allowed {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ImageReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageReviewContainerSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageReviewSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Containers) > 0 {
+ for _, e := range m.Containers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageReviewStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AuditAnnotations) > 0 {
+ for k, v := range m.AuditAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ImageReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageReview{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageReviewContainerSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageReviewContainerSpec{`,
+ `Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageReviewSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForContainers := "[]ImageReviewContainerSpec{"
+ for _, f := range this.Containers {
+ repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForContainers += "}"
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ s := strings.Join([]string{`&ImageReviewSpec{`,
+ `Containers:` + repeatedStringForContainers + `,`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageReviewStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
+ for k := range this.AuditAnnotations {
+ keysForAuditAnnotations = append(keysForAuditAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ mapStringForAuditAnnotations := "map[string]string{"
+ for _, k := range keysForAuditAnnotations {
+ mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
+ }
+ mapStringForAuditAnnotations += "}"
+ s := strings.Join([]string{`&ImageReviewStatus{`,
+ `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ImageReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Containers = append(m.Containers, ImageReviewContainerSpec{})
+ if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Allowed = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditAnnotations == nil {
+ m.AuditAnnotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.AuditAnnotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
new file mode 100644
index 00000000000..51328dde218
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
@@ -0,0 +1,88 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.imagepolicy.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/imagepolicy/v1alpha1";
+
+// ImageReview checks if the set of images in a pod are allowed.
+message ImageReview {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec holds information about the pod being evaluated
+ optional ImageReviewSpec spec = 2;
+
+ // Status is filled in by the backend and indicates whether the pod should be allowed.
+ // +optional
+ optional ImageReviewStatus status = 3;
+}
+
+// ImageReviewContainerSpec is a description of a container within the pod creation request.
+message ImageReviewContainerSpec {
+ // This can be in the form image:tag or image@SHA:012345679abcdef.
+ // +optional
+ optional string image = 1;
+}
+
+// ImageReviewSpec is a description of the pod creation request.
+message ImageReviewSpec {
+ // Containers is a list of a subset of the information in each container of the Pod being created.
+ // +optional
+ repeated ImageReviewContainerSpec containers = 1;
+
+ // Annotations is a list of key-value pairs extracted from the Pod's annotations.
+ // It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
+ // It is up to each webhook backend to determine how to interpret these annotations, if at all.
+ // +optional
+ map annotations = 2;
+
+ // Namespace is the namespace the pod is being created in.
+ // +optional
+ optional string namespace = 3;
+}
+
+// ImageReviewStatus is the result of the review for the pod creation request.
+message ImageReviewStatus {
+ // Allowed indicates that all images were allowed to be run.
+ optional bool allowed = 1;
+
+ // Reason should be empty unless Allowed is false in which case it
+ // may contain a short description of what is wrong. Kubernetes
+ // may truncate excessively long errors when displaying to the user.
+ // +optional
+ optional string reason = 2;
+
+ // AuditAnnotations will be added to the attributes object of the
+ // admission controller request using 'AddAnnotation'. The keys should
+ // be prefix-less (i.e., the admission controller will add an
+ // appropriate prefix).
+ // +optional
+ map auditAnnotations = 3;
+}
+
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
new file mode 100644
index 00000000000..477571bbb27
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "imagepolicy.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ImageReview{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
new file mode 100644
index 00000000000..151ffb1e9a3
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageReview checks if the set of images in a pod are allowed.
+type ImageReview struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec holds information about the pod being evaluated
+ Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is filled in by the backend and indicates whether the pod should be allowed.
+ // +optional
+ Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ImageReviewSpec is a description of the pod creation request.
+type ImageReviewSpec struct {
+ // Containers is a list of a subset of the information in each container of the Pod being created.
+ // +optional
+ Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"`
+ // Annotations is a list of key-value pairs extracted from the Pod's annotations.
+ // It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
+ // It is up to each webhook backend to determine how to interpret these annotations, if at all.
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"`
+ // Namespace is the namespace the pod is being created in.
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+}
+
+// ImageReviewContainerSpec is a description of a container within the pod creation request.
+type ImageReviewContainerSpec struct {
+ // This can be in the form image:tag or image@SHA:012345679abcdef.
+ // +optional
+ Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
+ // In future, we may add command line overrides, exec health check command lines, and so on.
+}
+
+// ImageReviewStatus is the result of the review for the pod creation request.
+type ImageReviewStatus struct {
+ // Allowed indicates that all images were allowed to be run.
+ Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
+ // Reason should be empty unless Allowed is false in which case it
+ // may contain a short description of what is wrong. Kubernetes
+ // may truncate excessively long errors when displaying to the user.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+ // AuditAnnotations will be added to the attributes object of the
+ // admission controller request using 'AddAnnotation'. The keys should
+ // be prefix-less (i.e., the admission controller will add an
+ // appropriate prefix).
+ // +optional
+ AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"`
+}
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 00000000000..8d51e77a08b
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,72 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ImageReview = map[string]string{
+ "": "ImageReview checks if the set of images in a pod are allowed.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the pod being evaluated",
+ "status": "Status is filled in by the backend and indicates whether the pod should be allowed.",
+}
+
+func (ImageReview) SwaggerDoc() map[string]string {
+ return map_ImageReview
+}
+
+var map_ImageReviewContainerSpec = map[string]string{
+ "": "ImageReviewContainerSpec is a description of a container within the pod creation request.",
+ "image": "This can be in the form image:tag or image@SHA:012345679abcdef.",
+}
+
+func (ImageReviewContainerSpec) SwaggerDoc() map[string]string {
+ return map_ImageReviewContainerSpec
+}
+
+var map_ImageReviewSpec = map[string]string{
+ "": "ImageReviewSpec is a description of the pod creation request.",
+ "containers": "Containers is a list of a subset of the information in each container of the Pod being created.",
+ "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.",
+ "namespace": "Namespace is the namespace the pod is being created in.",
+}
+
+func (ImageReviewSpec) SwaggerDoc() map[string]string {
+ return map_ImageReviewSpec
+}
+
+var map_ImageReviewStatus = map[string]string{
+ "": "ImageReviewStatus is the result of the review for the pod creation request.",
+ "allowed": "Allowed indicates that all images were allowed to be run.",
+ "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.",
+ "auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'. The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).",
+}
+
+func (ImageReviewStatus) SwaggerDoc() map[string]string {
+ return map_ImageReviewStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..f230656f3fe
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,121 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReview) DeepCopyInto(out *ImageReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview.
+func (in *ImageReview) DeepCopy() *ImageReview {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec.
+func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageReviewContainerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) {
+ *out = *in
+ if in.Containers != nil {
+ in, out := &in.Containers, &out.Containers
+ *out = make([]ImageReviewContainerSpec, len(*in))
+ copy(*out, *in)
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec.
+func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageReviewSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) {
+ *out = *in
+ if in.AuditAnnotations != nil {
+ in, out := &in.AuditAnnotations, &out.AuditAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus.
+func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageReviewStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/apitesting/codec.go b/vendor/k8s.io/apimachinery/pkg/api/apitesting/codec.go
new file mode 100644
index 00000000000..542b0aa275d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/apitesting/codec.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apitesting
+
+import (
+ "fmt"
+ "mime"
+ "os"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+)
+
+var (
+ testCodecMediaType string
+ testStorageCodecMediaType string
+)
+
+// TestCodec returns the codec for the API version to test against, as set by the
+// KUBE_TEST_API_TYPE env var.
+func TestCodec(codecs runtimeserializer.CodecFactory, gvs ...schema.GroupVersion) runtime.Codec {
+ if len(testCodecMediaType) != 0 {
+ serializerInfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), testCodecMediaType)
+ if !ok {
+ panic(fmt.Sprintf("no serializer for %s", testCodecMediaType))
+ }
+ return codecs.CodecForVersions(serializerInfo.Serializer, codecs.UniversalDeserializer(), schema.GroupVersions(gvs), nil)
+ }
+ return codecs.LegacyCodec(gvs...)
+}
+
+// TestStorageCodec returns the codec for the API version to test against used in storage, as set by the
+// KUBE_TEST_API_STORAGE_TYPE env var.
+func TestStorageCodec(codecs runtimeserializer.CodecFactory, gvs ...schema.GroupVersion) runtime.Codec {
+ if len(testStorageCodecMediaType) != 0 {
+ serializerInfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), testStorageCodecMediaType)
+ if !ok {
+ panic(fmt.Sprintf("no serializer for %s", testStorageCodecMediaType))
+ }
+
+ // etcd2 only supports string data - we must wrap any result before returning
+ // TODO: remove for etcd3 / make parameterizable
+ serializer := serializerInfo.Serializer
+ if !serializerInfo.EncodesAsText {
+ serializer = runtime.NewBase64Serializer(serializer, serializer)
+ }
+
+ decoder := recognizer.NewDecoder(serializer, codecs.UniversalDeserializer())
+ return codecs.CodecForVersions(serializer, decoder, schema.GroupVersions(gvs), nil)
+
+ }
+ return codecs.LegacyCodec(gvs...)
+}
+
+func init() {
+ var err error
+ if apiMediaType := os.Getenv("KUBE_TEST_API_TYPE"); len(apiMediaType) > 0 {
+ testCodecMediaType, _, err = mime.ParseMediaType(apiMediaType)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ if storageMediaType := os.Getenv("KUBE_TEST_API_STORAGE_TYPE"); len(storageMediaType) > 0 {
+ testStorageCodecMediaType, _, err = mime.ParseMediaType(storageMediaType)
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+// InstallOrDieFunc mirrors install functions that require success
+type InstallOrDieFunc func(scheme *runtime.Scheme)
+
+// SchemeForInstallOrDie builds a simple test scheme and codecfactory pair for easy unit testing from higher level install methods
+func SchemeForInstallOrDie(installFns ...InstallOrDieFunc) (*runtime.Scheme, runtimeserializer.CodecFactory) {
+ scheme := runtime.NewScheme()
+ codecFactory := runtimeserializer.NewCodecFactory(scheme)
+ for _, installFn := range installFns {
+ installFn(scheme)
+ }
+
+ return scheme, codecFactory
+}
+
+// InstallFunc mirrors install functions that can return an error
+type InstallFunc func(scheme *runtime.Scheme) error
+
+// SchemeForOrDie builds a simple test scheme and codecfactory pair for easy unit testing from the bare registration methods.
+func SchemeForOrDie(installFns ...InstallFunc) (*runtime.Scheme, runtimeserializer.CodecFactory) {
+ scheme := runtime.NewScheme()
+ codecFactory := runtimeserializer.NewCodecFactory(scheme)
+ for _, installFn := range installFns {
+ if err := installFn(scheme); err != nil {
+ panic(err)
+ }
+ }
+
+ return scheme, codecFactory
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/fuzzer.go b/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/fuzzer.go
new file mode 100644
index 00000000000..f528e9f92de
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/fuzzer.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzzer
+
+import (
+ "math/rand"
+
+ "github.com/google/gofuzz"
+
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+// FuzzerFuncs returns a list of func(*SomeType, c fuzz.Continue) functions.
+type FuzzerFuncs func(codecs runtimeserializer.CodecFactory) []interface{}
+
+// FuzzerFor can randomly populate api objects that are destined for version.
+func FuzzerFor(funcs FuzzerFuncs, src rand.Source, codecs runtimeserializer.CodecFactory) *fuzz.Fuzzer {
+ f := fuzz.New().NilChance(.5).NumElements(0, 1)
+ if src != nil {
+ f.RandSource(src)
+ }
+ f.Funcs(funcs(codecs)...)
+ return f
+}
+
+// MergeFuzzerFuncs will merge the given funcLists, overriding early funcs with later ones if there first
+// argument has the same type.
+func MergeFuzzerFuncs(funcs ...FuzzerFuncs) FuzzerFuncs {
+ return FuzzerFuncs(func(codecs runtimeserializer.CodecFactory) []interface{} {
+ result := []interface{}{}
+ for _, f := range funcs {
+ if f != nil {
+ result = append(result, f(codecs)...)
+ }
+ }
+ return result
+ })
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/valuefuzz.go b/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/valuefuzz.go
new file mode 100644
index 00000000000..cd71c517da3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/apitesting/fuzzer/valuefuzz.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzzer
+
+import (
+ "reflect"
+)
+
+// ValueFuzz recursively changes all basic type values in an object. Any kind of references will not
+// be touch, i.e. the addresses of slices, maps, pointers will stay unchanged.
+func ValueFuzz(obj interface{}) {
+ valueFuzz(reflect.ValueOf(obj))
+}
+
+func valueFuzz(obj reflect.Value) {
+ switch obj.Kind() {
+ case reflect.Array:
+ for i := 0; i < obj.Len(); i++ {
+ valueFuzz(obj.Index(i))
+ }
+ case reflect.Slice:
+ if obj.IsNil() {
+ // TODO: set non-nil value
+ } else {
+ for i := 0; i < obj.Len(); i++ {
+ valueFuzz(obj.Index(i))
+ }
+ }
+ case reflect.Interface, reflect.Ptr:
+ if obj.IsNil() {
+ // TODO: set non-nil value
+ } else {
+ valueFuzz(obj.Elem())
+ }
+ case reflect.Struct:
+ for i, n := 0, obj.NumField(); i < n; i++ {
+ valueFuzz(obj.Field(i))
+ }
+ case reflect.Map:
+ if obj.IsNil() {
+ // TODO: set non-nil value
+ } else {
+ for _, k := range obj.MapKeys() {
+ // map values are not addressable. We need a copy.
+ v := obj.MapIndex(k)
+ copy := reflect.New(v.Type())
+ copy.Elem().Set(v)
+ valueFuzz(copy.Elem())
+ obj.SetMapIndex(k, copy.Elem())
+ }
+ // TODO: set some new value
+ }
+ case reflect.Func: // ignore, we don't have function types in our API
+ default:
+ if !obj.CanSet() {
+ return
+ }
+ switch obj.Kind() {
+ case reflect.String:
+ obj.SetString(obj.String() + "x")
+ case reflect.Bool:
+ obj.SetBool(!obj.Bool())
+ case reflect.Float32, reflect.Float64:
+ obj.SetFloat(obj.Float()*2.0 + 1.0)
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ obj.SetInt(obj.Int() + 1)
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ obj.SetUint(obj.Uint() + 1)
+ default:
+ }
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer/fuzzer.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer/fuzzer.go
new file mode 100644
index 00000000000..6bc1356ed46
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer/fuzzer.go
@@ -0,0 +1,337 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzzer
+
+import (
+ "fmt"
+ "math/rand"
+ "sort"
+ "strconv"
+ "strings"
+
+ fuzz "github.com/google/gofuzz"
+
+ apitesting "k8s.io/apimachinery/pkg/api/apitesting"
+ "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+func genericFuzzerFuncs(codecs runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ func(q *resource.Quantity, c fuzz.Continue) {
+ *q = *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent)
+ },
+ func(j *int, c fuzz.Continue) {
+ *j = int(c.Int31())
+ },
+ func(j **int, c fuzz.Continue) {
+ if c.RandBool() {
+ i := int(c.Int31())
+ *j = &i
+ } else {
+ *j = nil
+ }
+ },
+ func(j *runtime.TypeMeta, c fuzz.Continue) {
+ // We have to customize the randomization of TypeMetas because their
+ // APIVersion and Kind must remain blank in memory.
+ j.APIVersion = ""
+ j.Kind = ""
+ },
+ func(j *runtime.Object, c fuzz.Continue) {
+ // TODO: uncomment when round trip starts from a versioned object
+ if true { //c.RandBool() {
+ *j = &runtime.Unknown{
+ // We do not set TypeMeta here because it is not carried through a round trip
+ Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`),
+ ContentType: runtime.ContentTypeJSON,
+ }
+ } else {
+ types := []runtime.Object{&metav1.Status{}, &metav1.APIGroup{}}
+ t := types[c.Rand.Intn(len(types))]
+ c.Fuzz(t)
+ *j = t
+ }
+ },
+ func(r *runtime.RawExtension, c fuzz.Continue) {
+ // Pick an arbitrary type and fuzz it
+ types := []runtime.Object{&metav1.Status{}, &metav1.APIGroup{}}
+ obj := types[c.Rand.Intn(len(types))]
+ c.Fuzz(obj)
+
+ // Find a codec for converting the object to raw bytes. This is necessary for the
+ // api version and kind to be correctly set be serialization.
+ var codec = apitesting.TestCodec(codecs, metav1.SchemeGroupVersion)
+
+ // Convert the object to raw bytes
+ bytes, err := runtime.Encode(codec, obj)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to encode object: %v", err))
+ }
+
+ // strip trailing newlines which do not survive roundtrips
+ for len(bytes) >= 1 && bytes[len(bytes)-1] == 10 {
+ bytes = bytes[:len(bytes)-1]
+ }
+
+ // Set the bytes field on the RawExtension
+ r.Raw = bytes
+ },
+ }
+}
+
+// taken from gofuzz internals for RandString
+type charRange struct {
+ first, last rune
+}
+
+func (c *charRange) choose(r *rand.Rand) rune {
+ count := int64(c.last - c.first + 1)
+ ch := c.first + rune(r.Int63n(count))
+
+ return ch
+}
+
+// randomLabelPart produces a valid random label value or name-part
+// of a label key.
+func randomLabelPart(c fuzz.Continue, canBeEmpty bool) string {
+ validStartEnd := []charRange{{'0', '9'}, {'a', 'z'}, {'A', 'Z'}}
+ validMiddle := []charRange{{'0', '9'}, {'a', 'z'}, {'A', 'Z'},
+ {'.', '.'}, {'-', '-'}, {'_', '_'}}
+
+ partLen := c.Rand.Intn(64) // len is [0, 63]
+ if !canBeEmpty {
+ partLen = c.Rand.Intn(63) + 1 // len is [1, 63]
+ }
+
+ runes := make([]rune, partLen)
+ if partLen == 0 {
+ return string(runes)
+ }
+
+ runes[0] = validStartEnd[c.Rand.Intn(len(validStartEnd))].choose(c.Rand)
+ for i := range runes[1:] {
+ runes[i+1] = validMiddle[c.Rand.Intn(len(validMiddle))].choose(c.Rand)
+ }
+ runes[len(runes)-1] = validStartEnd[c.Rand.Intn(len(validStartEnd))].choose(c.Rand)
+
+ return string(runes)
+}
+
+func randomDNSLabel(c fuzz.Continue) string {
+ validStartEnd := []charRange{{'0', '9'}, {'a', 'z'}}
+ validMiddle := []charRange{{'0', '9'}, {'a', 'z'}, {'-', '-'}}
+
+ partLen := c.Rand.Intn(63) + 1 // len is [1, 63]
+ runes := make([]rune, partLen)
+
+ runes[0] = validStartEnd[c.Rand.Intn(len(validStartEnd))].choose(c.Rand)
+ for i := range runes[1:] {
+ runes[i+1] = validMiddle[c.Rand.Intn(len(validMiddle))].choose(c.Rand)
+ }
+ runes[len(runes)-1] = validStartEnd[c.Rand.Intn(len(validStartEnd))].choose(c.Rand)
+
+ return string(runes)
+}
+
+func randomLabelKey(c fuzz.Continue) string {
+ namePart := randomLabelPart(c, false)
+ prefixPart := ""
+
+ usePrefix := c.RandBool()
+ if usePrefix {
+ // we can fit, with dots, at most 3 labels in the 253 allotted characters
+ prefixPartsLen := c.Rand.Intn(2) + 1
+ prefixParts := make([]string, prefixPartsLen)
+ for i := range prefixParts {
+ prefixParts[i] = randomDNSLabel(c)
+ }
+ prefixPart = strings.Join(prefixParts, ".") + "/"
+ }
+
+ return prefixPart + namePart
+}
+
+func v1FuzzerFuncs(codecs runtimeserializer.CodecFactory) []interface{} {
+
+ return []interface{}{
+ func(j *metav1.TypeMeta, c fuzz.Continue) {
+ // We have to customize the randomization of TypeMetas because their
+ // APIVersion and Kind must remain blank in memory.
+ j.APIVersion = ""
+ j.Kind = ""
+ },
+ func(j *metav1.ObjectMeta, c fuzz.Continue) {
+ c.FuzzNoCustom(j)
+
+ j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
+ j.UID = types.UID(c.RandString())
+
+ // Fuzzing sec and nsec in a smaller range (uint32 instead of int64),
+ // so that the result Unix time is a valid date and can be parsed into RFC3339 format.
+ var sec, nsec uint32
+ c.Fuzz(&sec)
+ c.Fuzz(&nsec)
+ j.CreationTimestamp = metav1.Unix(int64(sec), int64(nsec)).Rfc3339Copy()
+
+ if j.DeletionTimestamp != nil {
+ c.Fuzz(&sec)
+ c.Fuzz(&nsec)
+ t := metav1.Unix(int64(sec), int64(nsec)).Rfc3339Copy()
+ j.DeletionTimestamp = &t
+ }
+
+ if len(j.Labels) == 0 {
+ j.Labels = nil
+ } else {
+ delete(j.Labels, "")
+ }
+ if len(j.Annotations) == 0 {
+ j.Annotations = nil
+ } else {
+ delete(j.Annotations, "")
+ }
+ if len(j.OwnerReferences) == 0 {
+ j.OwnerReferences = nil
+ }
+ if len(j.Finalizers) == 0 {
+ j.Finalizers = nil
+ }
+ },
+ func(j *metav1.ResourceVersionMatch, c fuzz.Continue) {
+ matches := []metav1.ResourceVersionMatch{"", metav1.ResourceVersionMatchExact, metav1.ResourceVersionMatchNotOlderThan}
+ *j = matches[c.Rand.Intn(len(matches))]
+ },
+ func(j *metav1.ListMeta, c fuzz.Continue) {
+ j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
+ j.SelfLink = c.RandString()
+ },
+ func(j *metav1.LabelSelector, c fuzz.Continue) {
+ c.FuzzNoCustom(j)
+ // we can't have an entirely empty selector, so force
+ // use of MatchExpression if necessary
+ if len(j.MatchLabels) == 0 && len(j.MatchExpressions) == 0 {
+ j.MatchExpressions = make([]metav1.LabelSelectorRequirement, c.Rand.Intn(2)+1)
+ }
+
+ if j.MatchLabels != nil {
+ fuzzedMatchLabels := make(map[string]string, len(j.MatchLabels))
+ for i := 0; i < len(j.MatchLabels); i++ {
+ fuzzedMatchLabels[randomLabelKey(c)] = randomLabelPart(c, true)
+ }
+ j.MatchLabels = fuzzedMatchLabels
+ }
+
+ validOperators := []metav1.LabelSelectorOperator{
+ metav1.LabelSelectorOpIn,
+ metav1.LabelSelectorOpNotIn,
+ metav1.LabelSelectorOpExists,
+ metav1.LabelSelectorOpDoesNotExist,
+ }
+
+ if j.MatchExpressions != nil {
+ // NB: the label selector parser code sorts match expressions by key, and sorts the values,
+ // so we need to make sure ours are sorted as well here to preserve round-trip comparison.
+ // In practice, not sorting doesn't hurt anything...
+
+ for i := range j.MatchExpressions {
+ req := metav1.LabelSelectorRequirement{}
+ c.Fuzz(&req)
+ req.Key = randomLabelKey(c)
+ req.Operator = validOperators[c.Rand.Intn(len(validOperators))]
+ if req.Operator == metav1.LabelSelectorOpIn || req.Operator == metav1.LabelSelectorOpNotIn {
+ if len(req.Values) == 0 {
+ // we must have some values here, so randomly choose a short length
+ req.Values = make([]string, c.Rand.Intn(2)+1)
+ }
+ for i := range req.Values {
+ req.Values[i] = randomLabelPart(c, true)
+ }
+ sort.Strings(req.Values)
+ } else {
+ req.Values = nil
+ }
+ j.MatchExpressions[i] = req
+ }
+
+ sort.Slice(j.MatchExpressions, func(a, b int) bool { return j.MatchExpressions[a].Key < j.MatchExpressions[b].Key })
+ }
+ },
+ func(j *metav1.ManagedFieldsEntry, c fuzz.Continue) {
+ c.FuzzNoCustom(j)
+ j.FieldsV1 = nil
+ },
+ }
+}
+
+func v1beta1FuzzerFuncs(codecs runtimeserializer.CodecFactory) []interface{} {
+ return []interface{}{
+ func(r *metav1beta1.TableOptions, c fuzz.Continue) {
+ c.FuzzNoCustom(r)
+ // NoHeaders is not serialized to the wire but is allowed within the versioned
+ // type because we don't use meta internal types in the client and API server.
+ r.NoHeaders = false
+ },
+ func(r *metav1beta1.TableRow, c fuzz.Continue) {
+ c.Fuzz(&r.Object)
+ c.Fuzz(&r.Conditions)
+ if len(r.Conditions) == 0 {
+ r.Conditions = nil
+ }
+ n := c.Intn(10)
+ if n > 0 {
+ r.Cells = make([]interface{}, n)
+ }
+ for i := range r.Cells {
+ t := c.Intn(6)
+ switch t {
+ case 0:
+ r.Cells[i] = c.RandString()
+ case 1:
+ r.Cells[i] = c.Int63()
+ case 2:
+ r.Cells[i] = c.RandBool()
+ case 3:
+ x := map[string]interface{}{}
+ for j := c.Intn(10) + 1; j >= 0; j-- {
+ x[c.RandString()] = c.RandString()
+ }
+ r.Cells[i] = x
+ case 4:
+ x := make([]interface{}, c.Intn(10))
+ for i := range x {
+ x[i] = c.Int63()
+ }
+ r.Cells[i] = x
+ default:
+ r.Cells[i] = nil
+ }
+ }
+ },
+ }
+}
+
+var Funcs = fuzzer.MergeFuzzerFuncs(
+ genericFuzzerFuncs,
+ v1FuzzerFuncs,
+ v1beta1FuzzerFuncs,
+)
diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE
new file mode 100644
index 00000000000..8dada3edaf5
--- /dev/null
+++ b/vendor/k8s.io/kubectl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/kubectl/pkg/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go
new file mode 100644
index 00000000000..0aa436eeb9f
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+ admissionv1 "k8s.io/api/admission/v1"
+ admissionv1beta1 "k8s.io/api/admission/v1beta1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ authenticationv1 "k8s.io/api/authentication/v1"
+ authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+ batchv1 "k8s.io/api/batch/v1"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
+ certificatesv1 "k8s.io/api/certificates/v1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
+ networkingv1 "k8s.io/api/networking/v1"
+ policyv1 "k8s.io/api/policy/v1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ storagev1 "k8s.io/api/storage/v1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api
+// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis,
+// but does NOT register the internal types.
+func init() {
+ // Register external types for Scheme
+ metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme))
+ utilruntime.Must(metav1.AddMetaToScheme(Scheme))
+ utilruntime.Must(scheme.AddToScheme(Scheme))
+
+ utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(admissionv1beta1.SchemeGroupVersion, admissionv1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion, admissionregistrationv1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(autoscalingv1.SchemeGroupVersion, autoscalingv2beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(batchv1.SchemeGroupVersion, batchv1beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(certificatesv1.SchemeGroupVersion, certificatesv1beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(extensionsv1beta1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(imagepolicyv1alpha1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(networkingv1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion, policyv1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion))
+ utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion))
+}
diff --git a/vendor/k8s.io/kubectl/pkg/scheme/scheme.go b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go
new file mode 100644
index 00000000000..d1d7847b8f5
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones.
+
+// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
+var Scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+// ParameterCodec handles versioning of objects that are converted to query parameters.
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+
+// DefaultJSONEncoder returns a default encoder for our scheme
+func DefaultJSONEncoder() runtime.Encoder {
+ return unstructured.NewJSONFallbackEncoder(Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...))
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1a9e45a1161..e1bc0311c31 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -731,6 +731,7 @@ k8s.io/api/extensions/v1beta1
k8s.io/api/flowcontrol/v1alpha1
k8s.io/api/flowcontrol/v1beta1
k8s.io/api/flowcontrol/v1beta2
+k8s.io/api/imagepolicy/v1alpha1
k8s.io/api/networking/v1
k8s.io/api/networking/v1beta1
k8s.io/api/node/v1
@@ -753,10 +754,13 @@ k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
# k8s.io/apimachinery v0.24.2
## explicit; go 1.16
+k8s.io/apimachinery/pkg/api/apitesting
+k8s.io/apimachinery/pkg/api/apitesting/fuzzer
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
k8s.io/apimachinery/pkg/api/resource
+k8s.io/apimachinery/pkg/apis/meta/fuzzer
k8s.io/apimachinery/pkg/apis/meta/internalversion
k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
k8s.io/apimachinery/pkg/apis/meta/v1
@@ -1074,6 +1078,9 @@ k8s.io/kube-openapi/pkg/validation/spec
# k8s.io/kube-scheduler v0.23.1
## explicit; go 1.16
k8s.io/kube-scheduler/config/v1beta2
+# k8s.io/kubectl v0.23.0-alpha.4
+## explicit; go 1.16
+k8s.io/kubectl/pkg/scheme
# k8s.io/pod-security-admission v0.23.5
## explicit; go 1.16
k8s.io/pod-security-admission/admission/api