diff --git a/go.mod b/go.mod index b6f04b9af1c1..65ff990a2aca 100644 --- a/go.mod +++ b/go.mod @@ -61,10 +61,10 @@ require ( github.com/opencontainers/go-digest v1.0.0 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc8801b9df github.com/openshift-kni/commatrix v0.0.5-0.20251111204857-e5a931eff73f - github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7 + github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd github.com/openshift/apiserver-library-go v0.0.0-20251015164739-79d04067059d github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee - github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 + github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 github.com/openshift/library-go v0.0.0-20251015151611-6fc7a74b67c5 github.com/operator-framework/api v0.36.0 github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 diff --git a/go.sum b/go.sum index fd0f94e7bf83..85085d216f15 100644 --- a/go.sum +++ b/go.sum @@ -828,14 +828,14 @@ github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc880 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc8801b9df/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= github.com/openshift-kni/commatrix v0.0.5-0.20251111204857-e5a931eff73f h1:E72Zoc+JImPehBrXkgaCbIDbSFuItvyX6RCaZ0FQE5k= github.com/openshift-kni/commatrix v0.0.5-0.20251111204857-e5a931eff73f/go.mod h1:cDVdp0eda7EHE6tLuSeo4IqPWdAX/KJK+ogBirIGtsI= -github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7 h1:Ot2fbEEPmF3WlPQkyEW/bUCV38GMugH/UmZvxpWceNc= -github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd h1:Hpjq/55Qb0Gy65RewTSWWlyxpSovaRF86EPjYQkdlRU= +github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/apiserver-library-go v0.0.0-20251015164739-79d04067059d h1:Mfya3RxHWvidOrKyHj3bmFn5x2B89DLZIvDAhwm+C2s= github.com/openshift/apiserver-library-go v0.0.0-20251015164739-79d04067059d/go.mod h1:zm2/rIUp0p83pz0/1kkSoKTqhTr3uUKSKQ9fP7Z3g7Y= github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee h1:+Sp5GGnjHDhT/a/nQ1xdp43UscBMr7G5wxsYotyhzJ4= github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= -github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 h1:9JBeIXmnHlpXTQPi7LPmu1jdxznBhAE7bb1K+3D8gxY= -github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235/go.mod h1:L49W6pfrZkfOE5iC1PqEkuLkXG4W0BX4w8b+L2Bv7fM= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 h1:6rd4zSo2UaWQcAPZfHK9yzKVqH0BnMv1hqMzqXZyTds= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13/go.mod h1:YvOmPmV7wcJxpfhTDuFqqs2Xpb3M3ovsM6Qs/i2ptq4= github.com/openshift/kubernetes v1.30.1-0.20251017123720-96593f323733 h1:Mpab1CmJPLVWGB0CNGoWnup/NScvv55MVPe94c8JgUk= github.com/openshift/kubernetes v1.30.1-0.20251017123720-96593f323733/go.mod h1:w3+IfrXNp5RosdDXg3LB55yijJqR/FwouvVntYHQf0o= github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20251017123720-96593f323733 h1:42lm41QwjG8JoSicx4FHcuIG2kxHxlUnz6c+ftg2e0E= diff --git a/test/e2e/upgrade/monitor.go b/test/e2e/upgrade/monitor.go index 9bf41b1d6a21..7fea639c3e17 100644 --- a/test/e2e/upgrade/monitor.go +++ b/test/e2e/upgrade/monitor.go @@ -38,7 +38,10 @@ func (m *versionMonitor) Check(initialGeneration int64, desired configv1.Update) m.lastCV = cv if cv.Status.ObservedGeneration > initialGeneration { - if cv.Spec.DesiredUpdate == nil || desired != *cv.Spec.DesiredUpdate { + if cv.Spec.DesiredUpdate == nil || + desired.Architecture != cv.Spec.DesiredUpdate.Architecture || + desired.Version != cv.Spec.DesiredUpdate.Version || + desired.Image != cv.Spec.DesiredUpdate.Image { return nil, "", fmt.Errorf("desired cluster version was changed by someone else: %v", cv.Spec.DesiredUpdate) } } diff --git a/test/extended/imagepolicy/imagepolicy.go b/test/extended/imagepolicy/imagepolicy.go index 4ca0f14fc7d1..8353de7cfede 100644 --- a/test/extended/imagepolicy/imagepolicy.go +++ b/test/extended/imagepolicy/imagepolicy.go @@ -337,10 +337,10 @@ func generateClusterImagePolicies() map[string]configv1.ClusterImagePolicy { ObjectMeta: metav1.ObjectMeta{Name: invalidPublicKeyClusterImagePolicyName}, Spec: configv1.ClusterImagePolicySpec{ Scopes: []configv1.ImageScope{testSignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PublicKeyRootOfTrust, - PublicKey: &configv1.PublicKey{ + PublicKey: &configv1.ImagePolicyPublicKeyRootOfTrust{ KeyData: []byte(`-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUoFUoYAReKXGy59xe5SQOk2aJ8o+ 2/Yz5Y8GcN3zFE6ViIvkGnHhMlAhXaX/bo0M9R62s0/6q++T7uwNFuOg8A== @@ -361,10 +361,10 @@ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUoFUoYAReKXGy59xe5SQOk2aJ8o+ ObjectMeta: metav1.ObjectMeta{Name: publiKeyRekorClusterImagePolicyName}, Spec: configv1.ClusterImagePolicySpec{ Scopes: []configv1.ImageScope{testSignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PublicKeyRootOfTrust, - PublicKey: &configv1.PublicKey{ + PublicKey: &configv1.ImagePolicyPublicKeyRootOfTrust{ KeyData: []byte(`-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKvZH0CXTk8XQkETuxkzkl3Bi4ms5 60l1/qUU0fRATNSCVORCog5PDFo5z0ZLeblWgwbn4c8xpvuo9jQFwpeOsg== @@ -385,10 +385,10 @@ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKvZH0CXTk8XQkETuxkzkl3Bi4ms5 ObjectMeta: metav1.ObjectMeta{Name: invalidPKIClusterImagePolicyName}, Spec: configv1.ClusterImagePolicySpec{ Scopes: []configv1.ImageScope{testPKISignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PKIRootOfTrust, - PKI: &configv1.PKI{ + PKI: &configv1.ImagePolicyPKIRootOfTrust{ CertificateAuthorityRootsData: []byte(`-----BEGIN CERTIFICATE----- MIICYDCCAgagAwIBAgIUTq5IQKTGqI9XDqGzdGzm8mI43qkwCgYIKoZIzj0EAwIw fDELMAkGA1UEBhMCLS0xDjAMBgNVBAgTBVNUQVRFMREwDwYDVQQHEwhMT0NBTElU @@ -423,10 +423,10 @@ TAIhALlR4yZRRYv2iaVPdgaptAI0LoDAtEUiO8Rb9FWJzpAN ObjectMeta: metav1.ObjectMeta{Name: pkiClusterImagePolicyName}, Spec: configv1.ClusterImagePolicySpec{ Scopes: []configv1.ImageScope{testPKISignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PKIRootOfTrust, - PKI: &configv1.PKI{ + PKI: &configv1.ImagePolicyPKIRootOfTrust{ CertificateAuthorityRootsData: []byte(`-----BEGIN CERTIFICATE----- MIIFvzCCA6egAwIBAgIUZnH3ITyYQMAp6lvNYc0fjRzzuBcwDQYJKoZIhvcNAQEL BQAwbjELMAkGA1UEBhMCRVMxETAPBgNVBAcMCFZhbGVuY2lhMQswCQYDVQQKDAJJ @@ -479,10 +479,10 @@ L8ITFP+Nw9Meiw4etw59CTAPCc7l4Zvwr1K2ZTBmVGxrqdasiqpI0utG69aItsPi ObjectMeta: metav1.ObjectMeta{Name: invalidEmailPKIClusterImagePolicyName}, Spec: configv1.ClusterImagePolicySpec{ Scopes: []configv1.ImageScope{testPKISignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PKIRootOfTrust, - PKI: &configv1.PKI{ + PKI: &configv1.ImagePolicyPKIRootOfTrust{ CertificateAuthorityRootsData: []byte(`-----BEGIN CERTIFICATE----- MIIFvzCCA6egAwIBAgIUZnH3ITyYQMAp6lvNYc0fjRzzuBcwDQYJKoZIhvcNAQEL BQAwbjELMAkGA1UEBhMCRVMxETAPBgNVBAcMCFZhbGVuY2lhMQswCQYDVQQKDAJJ @@ -541,10 +541,10 @@ func generateImagePolicies() map[string]configv1.ImagePolicy { ObjectMeta: metav1.ObjectMeta{Name: invalidPublicKeyImagePolicyName}, Spec: configv1.ImagePolicySpec{ Scopes: []configv1.ImageScope{testSignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PublicKeyRootOfTrust, - PublicKey: &configv1.PublicKey{ + PublicKey: &configv1.ImagePolicyPublicKeyRootOfTrust{ KeyData: []byte(`-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUoFUoYAReKXGy59xe5SQOk2aJ8o+ 2/Yz5Y8GcN3zFE6ViIvkGnHhMlAhXaX/bo0M9R62s0/6q++T7uwNFuOg8A== @@ -565,10 +565,10 @@ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUoFUoYAReKXGy59xe5SQOk2aJ8o+ ObjectMeta: metav1.ObjectMeta{Name: publiKeyRekorImagePolicyName}, Spec: configv1.ImagePolicySpec{ Scopes: []configv1.ImageScope{testSignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PublicKeyRootOfTrust, - PublicKey: &configv1.PublicKey{ + PublicKey: &configv1.ImagePolicyPublicKeyRootOfTrust{ KeyData: []byte(`-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKvZH0CXTk8XQkETuxkzkl3Bi4ms5 60l1/qUU0fRATNSCVORCog5PDFo5z0ZLeblWgwbn4c8xpvuo9jQFwpeOsg== @@ -589,10 +589,10 @@ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKvZH0CXTk8XQkETuxkzkl3Bi4ms5 ObjectMeta: metav1.ObjectMeta{Name: invalidPKIImagePolicyName}, Spec: configv1.ImagePolicySpec{ Scopes: []configv1.ImageScope{testPKISignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PKIRootOfTrust, - PKI: &configv1.PKI{ + PKI: &configv1.ImagePolicyPKIRootOfTrust{ CertificateAuthorityRootsData: []byte(`-----BEGIN CERTIFICATE----- MIICYDCCAgagAwIBAgIUTq5IQKTGqI9XDqGzdGzm8mI43qkwCgYIKoZIzj0EAwIw fDELMAkGA1UEBhMCLS0xDjAMBgNVBAgTBVNUQVRFMREwDwYDVQQHEwhMT0NBTElU @@ -627,10 +627,10 @@ TAIhALlR4yZRRYv2iaVPdgaptAI0LoDAtEUiO8Rb9FWJzpAN ObjectMeta: metav1.ObjectMeta{Name: pkiImagePolicyName}, Spec: configv1.ImagePolicySpec{ Scopes: []configv1.ImageScope{testPKISignedPolicyScope}, - Policy: configv1.Policy{ + Policy: configv1.ImageSigstoreVerificationPolicy{ RootOfTrust: configv1.PolicyRootOfTrust{ PolicyType: configv1.PKIRootOfTrust, - PKI: &configv1.PKI{ + PKI: &configv1.ImagePolicyPKIRootOfTrust{ CertificateAuthorityRootsData: []byte(`-----BEGIN CERTIFICATE----- MIIFvzCCA6egAwIBAgIUZnH3ITyYQMAp6lvNYc0fjRzzuBcwDQYJKoZIhvcNAQEL BQAwbjELMAkGA1UEBhMCRVMxETAPBgNVBAcMCFZhbGVuY2lhMQswCQYDVQQKDAJJ diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index 6cedd8c19eaf..12320f2d9425 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -294,8 +294,8 @@ type ConfigV1ClientShim struct { } func (c *ConfigV1ClientShim) InsightsDataGathers() configv1.InsightsDataGatherInterface { - if c.v1Kinds["APIServer"] { - panic(fmt.Errorf("APIServer not implemented")) + if c.v1Kinds["InsightsDataGather"] { + panic(fmt.Errorf("InsightsDataGather not implemented")) } return c.configv1.InsightsDataGathers() } diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml index e307e5af6628..284a910090b7 100644 --- a/vendor/github.com/openshift/api/.ci-operator.yaml +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.24-openshift-4.21 + tag: rhel-9-release-golang-1.24-openshift-4.22 diff --git a/vendor/github.com/openshift/api/.coderabbit.yaml b/vendor/github.com/openshift/api/.coderabbit.yaml new file mode 100644 index 000000000000..1cb17f1e19bd --- /dev/null +++ b/vendor/github.com/openshift/api/.coderabbit.yaml @@ -0,0 +1,28 @@ +language: en-US +reviews: + profile: chill + high_level_summary: false + review_status: true + commit_status: true + collapse_walkthrough: true + changed_files_summary: false + sequence_diagrams: false + estimate_code_review_effort: false + poem: false + suggested_labels: false + path_filters: + - "!payload-manifests" + - "!**/zz_generated.crd-manifests/*" # Contains files + - "!**/zz_generated.featuregated-crd-manifests/**" # Contains folders + - "!**/vendor/**" + - "!vendor/**" + tools: + golangci-lint: + enabled: true +knowledge_base: + code_guidelines: + enabled: true + filePatterns: + - AGENTS.md + learnings: + scope: local diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index 649f9bed352c..516339b48828 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -11,15 +11,26 @@ linters: settings: linters: enable: + - forbiddenmarkers - maxlength + - namingconventions - nobools - nomaps - statussubresource + disable: + - statusoptional # This is legacy and not something we currently recommend. lintersConfig: conditions: isFirstField: Warn usePatchStrategy: Ignore useProtobuf: Ignore + forbiddenmarkers: + markers: + - identifier: "openshift:enable:FeatureSets" + - identifier: "openshift:validation:FeatureSetAwareEnum" + - identifier: "openshift:validation:FeatureSetAwareXValidation" + - identifier: "kubebuilder:validation:UniqueItems" + - identifier: "kubebuilder:validation:Pattern" # Use CEL expressions instead optionalfields: pointers: preference: WhenRequired @@ -33,7 +44,7 @@ linters: # This will force omitzero on optional struct fields. # This means they can be omitted correctly and prevents the need for pointers to structs. policy: SuggestFix - requiredFields: + requiredfields: pointers: # This will force pointers when the field is required, but only when the zero # value is a valid user choice, and has a semantic difference to being omitted (e.g. replicas allows 0). @@ -47,7 +58,18 @@ linters: # This will force omitzero on required struct fields. # This means they can be omitted correctly and prevents the need for pointers to structs. policy: SuggestFix - uniqueMarkers: + namingconventions: + conventions: + - name: norefs + violationMatcher: "(?i)ref(erence)?s?$" + operation: Drop + message: "reference fields should not need to be named ref(s)/reference(s)" + - name: nokind + violationMatcher: "^Kind$" + operation: Replacement + replacement: "Resource" + message: "API Kinds can be ambiguous and should be replaced with Resource" + uniquemarkers: customMarkers: - identifier: "openshift:validation:FeatureGateAwareEnum" attributes: diff --git a/vendor/github.com/openshift/api/AGENTS.md b/vendor/github.com/openshift/api/AGENTS.md index a009bbb2de2e..991ed62579b0 100644 --- a/vendor/github.com/openshift/api/AGENTS.md +++ b/vendor/github.com/openshift/api/AGENTS.md @@ -32,6 +32,29 @@ make clean # Clean build artifacts make update # Alias for update-codegen-crds ``` +#### Targeted Code Generation +When working on a specific API group/version, you can regenerate only the affected CRDs instead of all CRDs: + +```bash +# Regenerate CRDs for a specific API group/version +make update-codegen API_GROUP_VERSIONS=operator.openshift.io/v1alpha1 +make update-codegen API_GROUP_VERSIONS=config.openshift.io/v1 +make update-codegen API_GROUP_VERSIONS=route.openshift.io/v1 + +# Multiple API groups can be specified with comma separation +make update-codegen API_GROUP_VERSIONS=operator.openshift.io/v1alpha1,config.openshift.io/v1 +``` + +**Important:** While using `API_GROUP_VERSIONS` is faster for iteration (e.g., when developing tests), +it generates invalid OpenAPI data. This targeted generation is useful during development cycles, but you +**must run `make update`** (without `API_GROUP_VERSIONS`) to regenerate all files correctly before +committing changes. The full `make update` ensures all generated files, including OpenAPI schemas, are +properly synchronized. + +**Workflow:** +- During iteration: `make update-codegen API_GROUP_VERSIONS=your.group/v1` (fast feedback) +- Before committing: `make update` (ensures correctness) + ### Testing ```bash make test-unit # Run unit tests diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp index 45d24f4fccc2..65bd2228ad05 100644 --- a/vendor/github.com/openshift/api/Dockerfile.ocp +++ b/vendor/github.com/openshift/api/Dockerfile.ocp @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.21 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.22 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.21:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.22:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index fd4268a789ca..c069d804017a 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -1,7 +1,7 @@ all: build .PHONY: all -update: update-codegen-crds +update: update-non-codegen update-codegen RUNTIME ?= podman RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 @@ -17,10 +17,8 @@ test-unit: # # BEGIN: Update codegen-crds. Defaults to generating updates for all API packages. # To run a subset of packages: -# - Filter by group with make update-codegen-crds- -# E.g. make update-codegen-crds-machine -# - Set API_GROUP_VERSIONS to a space separated list of /. -# E.g. API_GROUP_VERSIONS="apps/v1 build/v1" make update-codegen-crds. +# - Set API_GROUP_VERSIONS to a space separated list of fully qualified /. +# E.g. API_GROUP_VERSIONS="apps.openshift.io/v1 build.openshift.io/v1" make update-codegen-crds. # FeatureSet generation is controlled at the group level by the # .codegen.yaml file. # @@ -57,23 +55,28 @@ verify-lint-fix: make lint-fix 2>/dev/null || true git diff --exit-code -.PHONY: verify-scripts -verify-scripts: - bash -x hack/verify-deepcopy.sh - bash -x hack/verify-openapi.sh +# Verify codegen runs all verifiers in the order they are defined in the root.go file. +# This includes all generators defined in update-codegen, but also the crd-schema-checker and crdify verifiers. +.PHONY: verify-codegen +verify-codegen: + EXTRA_ARGS=--verify hack/update-codegen.sh + +.PHONY: verify-non-codegen +verify-non-codegen: bash -x hack/verify-protobuf.sh - bash -x hack/verify-swagger-docs.sh hack/verify-crds.sh bash -x hack/verify-types.sh - bash -x hack/verify-compatibility.sh bash -x hack/verify-integration-tests.sh bash -x hack/verify-group-versions.sh bash -x hack/verify-prerelease-lifecycle-gen.sh hack/verify-payload-crds.sh hack/verify-payload-featuregates.sh +.PHONY: verify-scripts +verify-scripts: verify-non-codegen verify-codegen + .PHONY: verify -verify: verify-scripts lint verify-crd-schema verify-crdify verify-codegen-crds +verify: verify-scripts lint .PHONY: verify-codegen-crds verify-codegen-crds: @@ -99,8 +102,8 @@ verify-%: ################################################################################################ # # BEGIN: Update scripts. Defaults to generating updates for all API packages. -# Set API_GROUP_VERSIONS to a space separated list of / to limit -# the scope of the updates. Eg API_GROUP_VERSIONS="apps/v1 build/v1" make update-scripts. +# Set API_GROUP_VERSIONS to a space separated list of fully qualified / to limit +# the scope of the updates. Eg API_GROUP_VERSIONS="apps.openshift.io/v1 build.openshift.io/v1" make update-scripts. # Note: Protobuf generation is handled separately, see hack/lib/init.sh. # ################################################################################################ @@ -108,6 +111,19 @@ verify-%: .PHONY: update-scripts update-scripts: update-compatibility update-openapi update-deepcopy update-protobuf update-swagger-docs tests-vendor update-prerelease-lifecycle-gen update-payload-featuregates +# Update codegen runs all generators in the order they are defined in the root.go file. +# The per group generators are:[compatibility, deepcopy, swagger-docs, empty-partial-schema, schema-patch, crd-manifest-merge] +# The multi group generators are:[openapi] +.PHONY: update-codegen +update-codegen: + hack/update-codegen.sh + +# Update non-codegen runs all generators that are not part of the codegen utility, or +# are part of it, but are not run by default when invoking codegen without a specific generator. +# E.g. the payload feature gates which is not part of the generator style, but is still a subcommand. +.PHONY: update-non-codegen +update-non-codegen: update-protobuf tests-vendor update-prerelease-lifecycle-gen update-payload-crds update-payload-featuregates + .PHONY: update-compatibility update-compatibility: hack/update-compatibility.sh diff --git a/vendor/github.com/openshift/api/apiextensions/install.go b/vendor/github.com/openshift/api/apiextensions/install.go new file mode 100644 index 000000000000..adaca4d6bade --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/install.go @@ -0,0 +1,26 @@ +package apiextensions + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + apiextensionsv1alpha1 "github.com/openshift/api/apiextensions/v1alpha1" +) + +const ( + GroupName = "apiextensions.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(apiextensionsv1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/Makefile b/vendor/github.com/openshift/api/apiextensions/v1alpha1/Makefile new file mode 100644 index 000000000000..6cf1a197f78a --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="apiextensions.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/doc.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/doc.go new file mode 100644 index 000000000000..e5d665fbbc76 --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true + +// +groupName=apiextensions.openshift.io +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/register.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/register.go new file mode 100644 index 000000000000..a2f99e2a50b8 --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/register.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apiextensions.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &CompatibilityRequirement{}, + &CompatibilityRequirementList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go new file mode 100644 index 000000000000..46e211cd551f --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go @@ -0,0 +1,387 @@ +package v1alpha1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CompatibilityRequirement expresses a set of requirements on a target CRD. +// It is used to ensure compatibility between different actors using the same +// CRD. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:file-pattern=cvoRunLevel=0000_20,operatorName=crd-compatibility-checker,operatorOrdering=01 +// +openshift:enable:FeatureGate=CRDCompatibilityRequirementOperator +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=compatibilityrequirements,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2479 +type CompatibilityRequirement struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ObjectMeta `json:"metadata,omitzero"` + + // spec is the specification of the desired behavior of the Compatibility Requirement. + // +required + Spec CompatibilityRequirementSpec `json:"spec,omitzero"` + + // status is the most recently observed status of the Compatibility Requirement. + // +optional + Status CompatibilityRequirementStatus `json:"status,omitzero"` +} + +// CompatibilityRequirementSpec is the specification of the desired behavior of the Compatibility Requirement. +type CompatibilityRequirementSpec struct { + // compatibilitySchema defines the schema used by + // customResourceDefinitionSchemaValidation and objectSchemaValidation. + // This field is required. + // +required + CompatibilitySchema CompatibilitySchema `json:"compatibilitySchema,omitzero"` + + // customResourceDefinitionSchemaValidation ensures that updates to the + // installed CRD are compatible with this compatibility requirement. If not + // specified, admission of the target CRD will not be validated. + // This field is optional. + // +optional + CustomResourceDefinitionSchemaValidation CustomResourceDefinitionSchemaValidation `json:"customResourceDefinitionSchemaValidation,omitzero"` + + // objectSchemaValidation ensures that matching resources conform to + // compatibilitySchema. If not specified, admission of matching resources + // will not be validated. + // This field is optional. + // +optional + ObjectSchemaValidation ObjectSchemaValidation `json:"objectSchemaValidation,omitzero"` +} + +// CRDDataType indicates the type of the CRD data. +// +kubebuilder:validation:Enum=YAML +type CRDDataType string + +const ( + // CRDDataTypeYAML indicates that the CRD data is in YAML format. + CRDDataTypeYAML CRDDataType = "YAML" +) + +// CRDData contains the complete definition of a CRD. +type CRDData struct { + // type indicates the type of the CRD data. The only supported type is "YAML". + // This field is required. + // +required + Type CRDDataType `json:"type,omitempty"` + + // data contains the complete definition of the CRD. This field must be in + // the format specified by the type field. It may not be longer than 1572864 + // characters. + // This field is required. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1572864 + // +required + Data string `json:"data,omitempty"` +} + +// APIVersionSelectionType specifies a method for automatically selecting a +// set of API versions to require. +// +kubebuilder:validation:Enum=StorageOnly;AllServed +type APIVersionSelectionType string + +const ( + APIVersionSetTypeStorageOnly APIVersionSelectionType = "StorageOnly" + APIVersionSetTypeAllServed APIVersionSelectionType = "AllServed" +) + +// APIVersionString is a string representing a kubernetes API version. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:XValidation:rule="!format.dns1035Label().validate(self).hasValue()",message="It must contain only lower-case alphanumeric characters and hyphens and must start with an alphabetic character and end with an alphanumeric character" +type APIVersionString string + +// APIVersions specifies a set of API versions of a CRD. +// +kubebuilder:validation:XValidation:rule="self.defaultSelection != 'AllServed' || !has(self.additionalVersions)",message="additionalVersions may not be defined when defaultSelection is 'AllServed'" +type APIVersions struct { + // defaultSelection specifies a method for automatically selecting a set of + // versions to require. + // + // Valid options are StorageOnly and AllServed. + // When set to StorageOnly, only the storage version is selected for + // compatibility assessment. + // When set to AllServed, all served versions are selected for compatibility + // assessment. + // + // This field is required. + // +required + DefaultSelection APIVersionSelectionType `json:"defaultSelection,omitempty"` + + // additionalVersions specifies a set api versions to require in addition to + // the default selection. It is explicitly permitted to specify a version in + // additionalVersions which was also selected by the default selection. The + // selections will be merged and deduplicated. + // + // Each item must be at most 63 characters in length, and must must consist + // of only lowercase alphanumeric characters and hyphens, and must start + // with an alphabetic character and end with an alphanumeric character.// with an alphabetic character and end with an alphanumeric character. + // At most 32 additional versions may be specified. + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + // +listType=set + // +optional + AdditionalVersions []APIVersionString `json:"additionalVersions,omitempty"` +} + +// APIExcludedField describes a field in the schema which will not be validated by +// crdSchemaValidation or objectSchemaValidation. +type APIExcludedField struct { + // path is the path to the field in the schema. + // Paths are dot-separated field names (e.g., "fieldA.fieldB.fieldC") representing nested object fields. + // If part of the path is a slice (e.g., "status.conditions") the remaining path is applied to all items in the slice + // (e.g., "status.conditions.lastTransitionTimestamp"). + // Each field name must be a valid Kubernetes CRD field name: start with a letter, contain only + // letters, digits, and underscores, and be between 1 and 63 characters in length. + // A path may contain at most 16 fields. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1023 + // +kubebuilder:validation:XValidation:rule="self.split('.').size() <= 16",message="There may be at most 16 fields in the path." + // +kubebuilder:validation:XValidation:rule="self.split('.', 16).all(f, f.matches('^[a-zA-Z][a-zA-Z0-9_]{0,62}$'))",message="path must be dot-separated field names, each starting with a letter and containing only letters, digits, and underscores not exceeding 63 characters. There may be at most 16 fields in the path." + // +required + Path string `json:"path,omitempty"` + + // versions are the API versions the field is excluded from. + // When not specified, the field is excluded from all versions. + // + // Each item must be at most 63 characters in length, and must must + // consist of only lowercase alphanumeric characters and hyphens, and must + // start with an alphabetic character and end with an alphanumeric + // character. + // At most 32 versions may be specified. + // + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + // +listType=set + // +optional + Versions []APIVersionString `json:"versions,omitempty"` +} + +// CompatibilitySchema defines the schema used by crdSchemaValidation and objectSchemaValidation. +type CompatibilitySchema struct { + // customResourceDefinition contains the complete definition of the CRD for schema and object validation purposes. + // This field is required. + // +required + CustomResourceDefinition CRDData `json:"customResourceDefinition,omitzero"` + + // requiredVersions specifies a subset of the CRD's API versions which will be asserted for compatibility. + // This field is required. + // +required + RequiredVersions APIVersions `json:"requiredVersions,omitzero"` + + // excludedFields is a set of fields in the schema which will not be validated by + // crdSchemaValidation or objectSchemaValidation. + // The list may contain at most 64 fields. + // When not specified, all fields in the schema will be validated. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=64 + // +listType=atomic + // +optional + ExcludedFields []APIExcludedField `json:"excludedFields,omitempty"` +} + +// CustomResourceDefinitionSchemaValidation ensures that updates to the installed CRD are compatible with this compatibility requirement. +type CustomResourceDefinitionSchemaValidation struct { + // action determines whether violations are rejected (Deny) or admitted with an API warning (Warn). + // Valid options are Deny and Warn. + // When set to Deny, incompatible CRDs will be rejected and not admitted to the cluster. + // When set to Warn, incompatible CRDs will be allowed but a warning will be generated in the API response. + // This field is required. + // +required + Action CRDAdmitAction `json:"action,omitempty"` +} + +// ObjectSchemaValidation ensures that matching objects conform to the compatibilitySchema. +type ObjectSchemaValidation struct { + // action determines whether violations are rejected (Deny) or admitted with an API warning (Warn). + // Valid options are Deny and Warn. + // When set to Deny, incompatible Objects will be rejected and not admitted to the cluster. + // When set to Warn, incompatible Objects will be allowed but a warning will be generated in the API response. + // This field is required. + // +required + Action CRDAdmitAction `json:"action,omitempty"` + + // namespaceSelector defines a label selector for namespaces. If defined, + // only objects in a namespace with matching labels will be subject to + // validation. When not specified, objects for validation will not be + // filtered by namespace. + // +kubebuilder:validation:XValidation:rule="size(self.matchLabels) > 0 || size(self.matchExpressions) > 0",message="must have at least one of matchLabels or matchExpressions when specified" + // +optional + NamespaceSelector metav1.LabelSelector `json:"namespaceSelector,omitempty"` + // objectSelector defines a label selector for objects. If defined, only + // objects with matching labels will be subject to validation. When not + // specified, objects for validation will not be filtered by label. + // +kubebuilder:validation:XValidation:rule="size(self.matchLabels) > 0 || size(self.matchExpressions) > 0",message="must have at least one of matchLabels or matchExpressions when specified" + // +optional + ObjectSelector metav1.LabelSelector `json:"objectSelector,omitempty"` + + // matchConditions defines the matchConditions field of the resulting ValidatingWebhookConfiguration. + // When present, must contain between 1 and 64 match conditions. + // When not specified, the webhook will match all requests according to its other selectors. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=64 + // +optional + MatchConditions []admissionregistrationv1.MatchCondition `json:"matchConditions,omitempty"` +} + +// CRDAdmitAction determines the action taken when a CRD is not compatible. +// +kubebuilder:validation:Enum=Deny;Warn +// +enum +type CRDAdmitAction string + +const ( + // CRDAdmitActionDeny means that incompatible CRDs will be rejected. + CRDAdmitActionDeny CRDAdmitAction = "Deny" + + // CRDAdmitActionWarn means that incompatible CRDs will be allowed but a warning will be generated. + CRDAdmitActionWarn CRDAdmitAction = "Warn" +) + +// CompatibilityRequirement's Progressing condition and corresponding reasons. +const ( + // CompatibilityRequirementProgressing is false if the spec has been + // completely reconciled against the condition's observed generation. + // True indicates that reconciliation is still in progress and the current status does not represent + // a stable state. Progressing false with an error reason indicates that the object cannot be reconciled. + CompatibilityRequirementProgressing string = "Progressing" + + // CompatibilityRequirementConfigurationErrorReason indicates that + // reconciliation cannot progress due to an invalid spec. The controller + // will not reconcile this object again until the spec is updated. + CompatibilityRequirementConfigurationErrorReason string = "ConfigurationError" + + // CompatibilityRequirementTransientErrorReason indicates that + // reconciliation failed due to an error that can be retried. + CompatibilityRequirementTransientErrorReason string = "TransientError" + + // CompatibilityRequirementUpToDateReason surfaces when reconciliation + // completed successfully for the condition's observed generation. + CompatibilityRequirementUpToDateReason string = "UpToDate" +) + +// CompatibilityRequirement's Admitted condition and corresponding reasons. +const ( + // CompatibilityRequirementAdmitted is true if the requirement has been configured in the validating webhook, + // otherwise false. + CompatibilityRequirementAdmitted string = "Admitted" + + // CompatibilityRequirementAdmittedReason surfaces when the requirement has been configured in the validating webhook. + CompatibilityRequirementAdmittedReason string = "Admitted" + + // CompatibilityRequirementNotAdmittedReason surfaces when the requirement has not been configured in the validating webhook. + CompatibilityRequirementNotAdmittedReason string = "NotAdmitted" +) + +// CompatibilityRequirement's Compatible condition and corresponding reasons. +const ( + // CompatibilityRequirementCompatible is true if the observed CRD is compatible with the requirement, + // otherwise false. Note that Compatible may be false when adding a new requirement which the existing + // CRD does not meet. + CompatibilityRequirementCompatible string = "Compatible" + + // CompatibilityRequirementRequirementsNotMetReason surfaces when a CRD exists, and it is not compatible with this requirement. + CompatibilityRequirementRequirementsNotMetReason string = "RequirementsNotMet" + + // CompatibilityRequirementCRDNotFoundReason surfaces when the referenced CRD does not exist. + CompatibilityRequirementCRDNotFoundReason string = "CRDNotFound" + + // CompatibilityRequirementCompatibleWithWarningsReason surfaces when the CRD exists and is compatible with this requirement, but Message contains one or more warning messages. + CompatibilityRequirementCompatibleWithWarningsReason string = "CompatibleWithWarnings" + + // CompatibilityRequirementCompatibleReason surfaces when the CRD exists and is compatible with this requirement. + CompatibilityRequirementCompatibleReason string = "Compatible" +) + +// CompatibilityRequirementStatus defines the observed status of the Compatibility Requirement. +// +kubebuilder:validation:MinProperties=1 +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.crdName) || has(self.crdName) && oldSelf.crdName == self.crdName",message="crdName cannot be changed once set" +type CompatibilityRequirementStatus struct { + // conditions is a list of conditions and their status. + // Known condition types are Progressing, Admitted, and Compatible. + // + // The Progressing condition indicates if reconciliation of a CompatibilityRequirement is still + // progressing or has finished. + // + // The Admitted condition indicates if the validating webhook has been configured. + // + // The Compatible condition indicates if the observed CRD is compatible with the requirement. + // + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // observedCRD documents the uid and generation of the CRD object when the current status was written. + // This field will be omitted if the target CRD does not exist or could not be retrieved. + // +optional + ObservedCRD ObservedCRD `json:"observedCRD,omitzero"` + + // crdName is the name of the target CRD. The target CRD is not required to + // exist, as we may legitimately place requirements on it before it is + // created. The observed CRD is given in status.observedCRD, which will be + // empty if no CRD is observed. + // When present, must be between 1 and 253 characters and conform to RFC 1123 subdomain format: + // lowercase alphanumeric characters, '-' or '.', starting and ending with alphanumeric characters. + // When not specified, the requirement applies to any CRD name discovered from the compatibility schema. + // This field is optional. Once set, the value cannot be changed and must always remain set. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +optional + CRDName string `json:"crdName,omitempty"` +} + +// ObservedCRD contains information about the observed target CRD. +// +kubebuilder:validation:XValidation:rule="oldSelf.uid != self.uid || self.generation >= oldSelf.generation",message="generation may only increase on the same CRD" +type ObservedCRD struct { + // uid is the uid of the observed CRD. + // Must be a valid UUID consisting of lowercase hexadecimal digits in 5 hyphenated blocks (8-4-4-4-12 format). + // Length must be between 1 and 36 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:Format=uuid + // +required + UID string `json:"uid,omitempty"` + + // generation is the observed generation of the CRD. + // Must be a positive integer (minimum value of 1). + // +kubebuilder:validation:Minimum=1 + // +required + Generation int64 `json:"generation,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CompatibilityRequirementList is a collection of CompatibilityRequirements. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type CompatibilityRequirementList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitzero"` + + // items is a list of CompatibilityRequirements. + // +kubebuilder:validation:MaxItems=1000 + // +optional + Items []CompatibilityRequirement `json:"items,omitempty"` +} diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..a9ff411a88d2 --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,254 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIExcludedField) DeepCopyInto(out *APIExcludedField) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersionString, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIExcludedField. +func (in *APIExcludedField) DeepCopy() *APIExcludedField { + if in == nil { + return nil + } + out := new(APIExcludedField) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersions) DeepCopyInto(out *APIVersions) { + *out = *in + if in.AdditionalVersions != nil { + in, out := &in.AdditionalVersions, &out.AdditionalVersions + *out = make([]APIVersionString, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions. +func (in *APIVersions) DeepCopy() *APIVersions { + if in == nil { + return nil + } + out := new(APIVersions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRDData) DeepCopyInto(out *CRDData) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDData. +func (in *CRDData) DeepCopy() *CRDData { + if in == nil { + return nil + } + out := new(CRDData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompatibilityRequirement) DeepCopyInto(out *CompatibilityRequirement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompatibilityRequirement. +func (in *CompatibilityRequirement) DeepCopy() *CompatibilityRequirement { + if in == nil { + return nil + } + out := new(CompatibilityRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CompatibilityRequirement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompatibilityRequirementList) DeepCopyInto(out *CompatibilityRequirementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CompatibilityRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompatibilityRequirementList. +func (in *CompatibilityRequirementList) DeepCopy() *CompatibilityRequirementList { + if in == nil { + return nil + } + out := new(CompatibilityRequirementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CompatibilityRequirementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompatibilityRequirementSpec) DeepCopyInto(out *CompatibilityRequirementSpec) { + *out = *in + in.CompatibilitySchema.DeepCopyInto(&out.CompatibilitySchema) + out.CustomResourceDefinitionSchemaValidation = in.CustomResourceDefinitionSchemaValidation + in.ObjectSchemaValidation.DeepCopyInto(&out.ObjectSchemaValidation) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompatibilityRequirementSpec. +func (in *CompatibilityRequirementSpec) DeepCopy() *CompatibilityRequirementSpec { + if in == nil { + return nil + } + out := new(CompatibilityRequirementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompatibilityRequirementStatus) DeepCopyInto(out *CompatibilityRequirementStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.ObservedCRD = in.ObservedCRD + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompatibilityRequirementStatus. +func (in *CompatibilityRequirementStatus) DeepCopy() *CompatibilityRequirementStatus { + if in == nil { + return nil + } + out := new(CompatibilityRequirementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompatibilitySchema) DeepCopyInto(out *CompatibilitySchema) { + *out = *in + out.CustomResourceDefinition = in.CustomResourceDefinition + in.RequiredVersions.DeepCopyInto(&out.RequiredVersions) + if in.ExcludedFields != nil { + in, out := &in.ExcludedFields, &out.ExcludedFields + *out = make([]APIExcludedField, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompatibilitySchema. +func (in *CompatibilitySchema) DeepCopy() *CompatibilitySchema { + if in == nil { + return nil + } + out := new(CompatibilitySchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSchemaValidation) DeepCopyInto(out *CustomResourceDefinitionSchemaValidation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSchemaValidation. +func (in *CustomResourceDefinitionSchemaValidation) DeepCopy() *CustomResourceDefinitionSchemaValidation { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSchemaValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectSchemaValidation) DeepCopyInto(out *ObjectSchemaValidation) { + *out = *in + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + in.ObjectSelector.DeepCopyInto(&out.ObjectSelector) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]admissionregistrationv1.MatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSchemaValidation. +func (in *ObjectSchemaValidation) DeepCopy() *ObjectSchemaValidation { + if in == nil { + return nil + } + out := new(ObjectSchemaValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservedCRD) DeepCopyInto(out *ObservedCRD) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedCRD. +func (in *ObservedCRD) DeepCopy() *ObservedCRD { + if in == nil { + return nil + } + out := new(ObservedCRD) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000000..319f2b335f23 --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,23 @@ +compatibilityrequirements.apiextensions.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2479 + CRDName: compatibilityrequirements.apiextensions.openshift.io + Capability: "" + Category: "" + FeatureGates: + - CRDCompatibilityRequirementOperator + FilenameOperatorName: crd-compatibility-checker + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_20" + GroupName: apiextensions.openshift.io + HasStatus: true + KindName: CompatibilityRequirement + Labels: {} + PluralName: compatibilityrequirements + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - CRDCompatibilityRequirementOperator + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000000..a9a4e707e7ae --- /dev/null +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,129 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIExcludedField = map[string]string{ + "": "APIExcludedField describes a field in the schema which will not be validated by crdSchemaValidation or objectSchemaValidation.", + "path": "path is the path to the field in the schema. Paths are dot-separated field names (e.g., \"fieldA.fieldB.fieldC\") representing nested object fields. If part of the path is a slice (e.g., \"status.conditions\") the remaining path is applied to all items in the slice (e.g., \"status.conditions.lastTransitionTimestamp\"). Each field name must be a valid Kubernetes CRD field name: start with a letter, contain only letters, digits, and underscores, and be between 1 and 63 characters in length. A path may contain at most 16 fields.", + "versions": "versions are the API versions the field is excluded from. When not specified, the field is excluded from all versions.\n\nEach item must be at most 63 characters in length, and must must consist of only lowercase alphanumeric characters and hyphens, and must start with an alphabetic character and end with an alphanumeric character. At most 32 versions may be specified.", +} + +func (APIExcludedField) SwaggerDoc() map[string]string { + return map_APIExcludedField +} + +var map_APIVersions = map[string]string{ + "": "APIVersions specifies a set of API versions of a CRD.", + "defaultSelection": "defaultSelection specifies a method for automatically selecting a set of versions to require.\n\nValid options are StorageOnly and AllServed. When set to StorageOnly, only the storage version is selected for compatibility assessment. When set to AllServed, all served versions are selected for compatibility assessment.\n\nThis field is required.", + "additionalVersions": "additionalVersions specifies a set api versions to require in addition to the default selection. It is explicitly permitted to specify a version in additionalVersions which was also selected by the default selection. The selections will be merged and deduplicated.\n\nEach item must be at most 63 characters in length, and must must consist of only lowercase alphanumeric characters and hyphens, and must start with an alphabetic character and end with an alphanumeric character.// with an alphabetic character and end with an alphanumeric character. At most 32 additional versions may be specified.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_CRDData = map[string]string{ + "": "CRDData contains the complete definition of a CRD.", + "type": "type indicates the type of the CRD data. The only supported type is \"YAML\". This field is required.", + "data": "data contains the complete definition of the CRD. This field must be in the format specified by the type field. It may not be longer than 1572864 characters. This field is required.", +} + +func (CRDData) SwaggerDoc() map[string]string { + return map_CRDData +} + +var map_CompatibilityRequirement = map[string]string{ + "": "CompatibilityRequirement expresses a set of requirements on a target CRD. It is used to ensure compatibility between different actors using the same CRD.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Compatibility Requirement.", + "status": "status is the most recently observed status of the Compatibility Requirement.", +} + +func (CompatibilityRequirement) SwaggerDoc() map[string]string { + return map_CompatibilityRequirement +} + +var map_CompatibilityRequirementList = map[string]string{ + "": "CompatibilityRequirementList is a collection of CompatibilityRequirements.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of CompatibilityRequirements.", +} + +func (CompatibilityRequirementList) SwaggerDoc() map[string]string { + return map_CompatibilityRequirementList +} + +var map_CompatibilityRequirementSpec = map[string]string{ + "": "CompatibilityRequirementSpec is the specification of the desired behavior of the Compatibility Requirement.", + "compatibilitySchema": "compatibilitySchema defines the schema used by customResourceDefinitionSchemaValidation and objectSchemaValidation. This field is required.", + "customResourceDefinitionSchemaValidation": "customResourceDefinitionSchemaValidation ensures that updates to the installed CRD are compatible with this compatibility requirement. If not specified, admission of the target CRD will not be validated. This field is optional.", + "objectSchemaValidation": "objectSchemaValidation ensures that matching resources conform to compatibilitySchema. If not specified, admission of matching resources will not be validated. This field is optional.", +} + +func (CompatibilityRequirementSpec) SwaggerDoc() map[string]string { + return map_CompatibilityRequirementSpec +} + +var map_CompatibilityRequirementStatus = map[string]string{ + "": "CompatibilityRequirementStatus defines the observed status of the Compatibility Requirement.", + "conditions": "conditions is a list of conditions and their status. Known condition types are Progressing, Admitted, and Compatible.\n\nThe Progressing condition indicates if reconciliation of a CompatibilityRequirement is still progressing or has finished.\n\nThe Admitted condition indicates if the validating webhook has been configured.\n\nThe Compatible condition indicates if the observed CRD is compatible with the requirement.", + "observedCRD": "observedCRD documents the uid and generation of the CRD object when the current status was written. This field will be omitted if the target CRD does not exist or could not be retrieved.", + "crdName": "crdName is the name of the target CRD. The target CRD is not required to exist, as we may legitimately place requirements on it before it is created. The observed CRD is given in status.observedCRD, which will be empty if no CRD is observed. When present, must be between 1 and 253 characters and conform to RFC 1123 subdomain format: lowercase alphanumeric characters, '-' or '.', starting and ending with alphanumeric characters. When not specified, the requirement applies to any CRD name discovered from the compatibility schema. This field is optional. Once set, the value cannot be changed and must always remain set.", +} + +func (CompatibilityRequirementStatus) SwaggerDoc() map[string]string { + return map_CompatibilityRequirementStatus +} + +var map_CompatibilitySchema = map[string]string{ + "": "CompatibilitySchema defines the schema used by crdSchemaValidation and objectSchemaValidation.", + "customResourceDefinition": "customResourceDefinition contains the complete definition of the CRD for schema and object validation purposes. This field is required.", + "requiredVersions": "requiredVersions specifies a subset of the CRD's API versions which will be asserted for compatibility. This field is required.", + "excludedFields": "excludedFields is a set of fields in the schema which will not be validated by crdSchemaValidation or objectSchemaValidation. The list may contain at most 64 fields. When not specified, all fields in the schema will be validated.", +} + +func (CompatibilitySchema) SwaggerDoc() map[string]string { + return map_CompatibilitySchema +} + +var map_CustomResourceDefinitionSchemaValidation = map[string]string{ + "": "CustomResourceDefinitionSchemaValidation ensures that updates to the installed CRD are compatible with this compatibility requirement.", + "action": "action determines whether violations are rejected (Deny) or admitted with an API warning (Warn). Valid options are Deny and Warn. When set to Deny, incompatible CRDs will be rejected and not admitted to the cluster. When set to Warn, incompatible CRDs will be allowed but a warning will be generated in the API response. This field is required.", +} + +func (CustomResourceDefinitionSchemaValidation) SwaggerDoc() map[string]string { + return map_CustomResourceDefinitionSchemaValidation +} + +var map_ObjectSchemaValidation = map[string]string{ + "": "ObjectSchemaValidation ensures that matching objects conform to the compatibilitySchema.", + "action": "action determines whether violations are rejected (Deny) or admitted with an API warning (Warn). Valid options are Deny and Warn. When set to Deny, incompatible Objects will be rejected and not admitted to the cluster. When set to Warn, incompatible Objects will be allowed but a warning will be generated in the API response. This field is required.", + "namespaceSelector": "namespaceSelector defines a label selector for namespaces. If defined, only objects in a namespace with matching labels will be subject to validation. When not specified, objects for validation will not be filtered by namespace.", + "objectSelector": "objectSelector defines a label selector for objects. If defined, only objects with matching labels will be subject to validation. When not specified, objects for validation will not be filtered by label.", + "matchConditions": "matchConditions defines the matchConditions field of the resulting ValidatingWebhookConfiguration. When present, must contain between 1 and 64 match conditions. When not specified, the webhook will match all requests according to its other selectors.", +} + +func (ObjectSchemaValidation) SwaggerDoc() map[string]string { + return map_ObjectSchemaValidation +} + +var map_ObservedCRD = map[string]string{ + "": "ObservedCRD contains information about the observed target CRD.", + "uid": "uid is the uid of the observed CRD. Must be a valid UUID consisting of lowercase hexadecimal digits in 5 hyphenated blocks (8-4-4-4-12 format). Length must be between 1 and 36 characters.", + "generation": "generation is the observed generation of the CRD. Must be a positive integer (minimum value of 1).", +} + +func (ObservedCRD) SwaggerDoc() map[string]string { + return map_ObservedCRD +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 52a41b2fef28..e300d4eabc5f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -5,7 +5,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings;ExternalOIDCWithUpstreamParity,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. @@ -91,6 +91,7 @@ type AuthenticationSpec struct { // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity // +optional OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -243,11 +244,27 @@ type OIDCProvider struct { // +listType=atomic // +optional ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` + + // userValidationRules is an optional field that configures the set of rules + // used to validate the cluster user identity that was constructed via + // mapping token claims to user identity attributes. + // Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. + // If any rule in the chain of rules evaluates to 'false', authentication will fail. + // When specified, at least one rule must be specified and no more than 64 rules may be specified. + // + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=expression + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + UserValidationRules []TokenUserValidationRule `json:"userValidationRules,omitempty"` } // +kubebuilder:validation:MinLength=1 type TokenAudience string +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="self.?discoveryURL.orValue(\"\").size() > 0 ? (self.issuerURL.size() == 0 || self.discoveryURL.find('^.+[^/]') != self.issuerURL.find('^.+[^/]')) : true",message="discoveryURL must be different from issuerURL" type TokenIssuer struct { // issuerURL is a required field that configures the URL used to issue tokens // by the identity provider. @@ -291,6 +308,24 @@ type TokenIssuer struct { // // +optional CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` + // discoveryURL is an optional field that, if specified, overrides the default discovery endpoint + // used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` + // as "{issuerURL}/.well-known/openid-configuration". + // + // The discoveryURL must be a valid absolute HTTPS URL. It must not contain query + // parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). + // The discoveryURL value must be at least 1 character long and no longer than 2048 characters. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="discoveryURL must be a valid URL" + // +kubebuilder:validation:XValidation:rule="url(self).getScheme() == 'https'",message="discoveryURL must be a valid https URL" + // +kubebuilder:validation:XValidation:rule="url(self).getQuery().size() == 0",message="discoveryURL must not contain query parameters" + // +kubebuilder:validation:XValidation:rule="self.matches('^[^#]*$')",message="discoveryURL must not contain fragments" + // +kubebuilder:validation:XValidation:rule="!self.matches('^https://.+:.+@.+/.*$')",message="discoveryURL must not contain user info" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + DiscoveryURL string `json:"discoveryURL,omitempty"` } type TokenClaimMappings struct { @@ -717,37 +752,56 @@ type PrefixedClaimMapping struct { Prefix string `json:"prefix"` } -// TokenValidationRuleType represents the different -// claim validation rule types that can be configured. +// TokenValidationRuleType defines the type of token validation rule. // +enum +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,enum="RequiredClaim"; +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDCWithUpstreamParity,enum="RequiredClaim";"CEL" type TokenValidationRuleType string const ( + // TokenValidationRuleTypeRequiredClaim indicates that the token must contain a specific claim. + // Used as a value for TokenValidationRuleType. TokenValidationRuleTypeRequiredClaim = "RequiredClaim" + // TokenValidationRuleTypeCEL indicates that the token validation is defined via a CEL expression. + // Used as a value for TokenValidationRuleType. + TokenValidationRuleTypeCEL = "CEL" ) +// TokenClaimValidationRule represents a validation rule based on token claims. +// If type is RequiredClaim, requiredClaim must be set. +// If Type is CEL, CEL must be set and RequiredClaim must be omitted. +// +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'RequiredClaim' ? has(self.requiredClaim) : !has(self.requiredClaim)",message="requiredClaim must be set when type is 'RequiredClaim', and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.type) && self.type == 'CEL' ? has(self.cel) : !has(self.cel)",message="cel must be set when type is 'CEL', and forbidden otherwise" type TokenClaimValidationRule struct { // type is an optional field that configures the type of the validation rule. // - // Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - // - // When set to 'RequiredClaim', the Kubernetes API server - // will be configured to validate that the incoming JWT - // contains the required claim and that its value matches - // the required value. + // Allowed values are "RequiredClaim" and "CEL". // - // Defaults to 'RequiredClaim'. + // When set to 'RequiredClaim', the Kubernetes API server will be configured + // to validate that the incoming JWT contains the required claim and that its + // value matches the required value. // - // +kubebuilder:validation:Enum={"RequiredClaim"} - // +kubebuilder:default="RequiredClaim" + // When set to 'CEL', the Kubernetes API server will be configured + // to validate the incoming JWT against the configured CEL expression. + // +required Type TokenValidationRuleType `json:"type"` - // requiredClaim is an optional field that configures the required claim - // and value that the Kubernetes API server will use to validate if an incoming - // JWT is valid for this identity provider. + // requiredClaim allows configuring a required claim name and its expected value. + // This field is required when `type` is set to RequiredClaim, and must be omitted + // when `type` is set to any other value. The Kubernetes API server uses this field + // to validate if an incoming JWT is valid for this identity provider. // // +optional RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` + + // cel holds the CEL expression and message for validation. + // Must be set when Type is "CEL", and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUpstreamParity + CEL TokenClaimValidationCELRule `json:"cel,omitempty,omitzero"` } type TokenRequiredClaim struct { @@ -771,3 +825,45 @@ type TokenRequiredClaim struct { // +required RequiredValue string `json:"requiredValue"` } + +type TokenClaimValidationCELRule struct { + // expression is a CEL expression evaluated against token claims. + // expression is required, must be at least 1 character in length and must not exceed 1024 characters. + // The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +required + Expression string `json:"expression,omitempty"` + + // message is a required human-readable message to be logged by the Kubernetes API server + // if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} + +// TokenUserValidationRule provides a CEL-based rule used to validate a token subject. +// Each rule contains a CEL expression that is evaluated against the token’s claims. +type TokenUserValidationRule struct { + // expression is a required CEL expression that performs a validation + // on cluster user identity attributes like username, groups, etc. + // The expression must evaluate to a boolean value. + // When the expression evaluates to 'true', the cluster user identity is considered valid. + // When the expression evaluates to 'false', the cluster user identity is not considered valid. + // expression must be at least 1 character in length and must not exceed 1024 characters. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + Expression string `json:"expression,omitempty"` + // message is a required human-readable message to be logged by the Kubernetes API server + // if the CEL expression defined in 'expression' fails. + // message must be at least 1 character in length and must not exceed 256 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go index ca604e05c5bd..491390098c37 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -52,7 +52,7 @@ type ClusterImagePolicySpec struct { // policy is a required field that contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. // +required - Policy Policy `json:"policy"` + Policy ImageSigstoreVerificationPolicy `json:"policy"` } // +k8s:deepcopy-gen=true diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index cfac9689e434..5f36f693de1a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -72,8 +72,10 @@ type ClusterVersionSpec struct { // // If an upgrade fails the operator will halt and report status // about the failing component. Setting the desired update value back to - // the previous version will cause a rollback to be attempted. Not all - // rollbacks will succeed. + // the previous version will cause a rollback to be attempted if the + // previous version is within the current minor version. Not all + // rollbacks will succeed, and some may unrecoverably break the + // cluster. // // +optional DesiredUpdate *Update `json:"desiredUpdate,omitempty"` @@ -197,9 +199,23 @@ type ClusterVersionStatus struct { // availableUpdates. This list may be empty if no updates are // recommended, if the update service is unavailable, or if an empty // or invalid channel has been specified. + // +kubebuilder:validation:MaxItems=500 // +listType=atomic // +optional ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` + + // conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. + // When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. + // If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. + // The risk names in the list must be unique. + // conditionalUpdateRisks must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=500 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + ConditionalUpdateRisks []ConditionalUpdateRisk `json:"conditionalUpdateRisks,omitempty"` } // UpdateState is a constant representing whether an update was successfully @@ -256,7 +272,7 @@ type UpdateHistory struct { Verified bool `json:"verified"` // acceptedRisks records risks which were accepted to initiate the update. - // For example, it may menition an Upgradeable=False or missing signature + // For example, it may mention an Upgradeable=False or missing signature // that was overridden via desiredUpdate.force, or an update that was // initiated despite not being in the availableUpdates set of recommended // update targets. @@ -718,14 +734,42 @@ type Update struct { Image string `json:"image"` // force allows an administrator to update to an image that has failed - // verification or upgradeable checks. This option should only - // be used when the authenticity of the provided image has been verified out - // of band because the provided image will run with full administrative access - // to the cluster. Do not use this flag with images that comes from unknown + // verification or upgradeable checks that are designed to keep your + // cluster safe. Only use this if: + // * you are testing unsigned release images in short-lived test clusters or + // * you are working around a known bug in the cluster-version + // operator and you have verified the authenticity of the provided + // image yourself. + // The provided image will run with full administrative access + // to the cluster. Do not use this flag with images that come from unknown // or potentially malicious sources. // // +optional Force bool `json:"force"` + + // acceptRisks is an optional set of names of conditional update risks that are considered acceptable. + // A conditional update is performed only if all of its risks are acceptable. + // This list may contain entries that apply to current, previous or future updates. + // The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. + // acceptRisks must not contain more than 1000 entries. + // Entries in this list must be unique. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MaxItems=1000 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + // +optional + AcceptRisks []AcceptRisk `json:"acceptRisks,omitempty"` +} + +// AcceptRisk represents a risk that is considered acceptable. +type AcceptRisk struct { + // name is the name of the acceptable risk. + // It must be a non-empty string and must not exceed 256 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Name string `json:"name,omitempty"` } // Release represents an OpenShift release image and associated metadata. @@ -781,12 +825,27 @@ type ConditionalUpdate struct { // +required Release Release `json:"release"` + // riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. + // The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. + // A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. + // The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. + // Entries must be unique and must not exceed 256 characters. + // riskNames must not contain more than 500 entries. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:items:MaxLength=256 + // +kubebuilder:validation:MaxItems=500 + // +listType=set + // +optional + RiskNames []string `json:"riskNames,omitempty"` + // risks represents the range of issues associated with // updating to the target release. The cluster-version // operator will evaluate all entries, and only recommend the // update if there is at least one entry and all entries // recommend the update. // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=200 // +patchMergeKey=name // +patchStrategy=merge // +listType=map @@ -807,6 +866,20 @@ type ConditionalUpdate struct { // for not recommending a conditional update. // +k8s:deepcopy-gen=true type ConditionalUpdateRisk struct { + // conditions represents the observations of the conditional update + // risk's current status. Known types are: + // * Applies, for whether the risk applies to the current cluster. + // The condition's types in the list must be unique. + // conditions must not contain more than one entry. + // +openshift:enable:FeatureGate=ClusterUpdateAcceptRisks + // +kubebuilder:validation:XValidation:rule="self.exists_one(x, x.type == 'Applies')",message="must contain a condition of type 'Applies'" + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // url contains information about this risk. // +kubebuilder:validation:Format=uri // +kubebuilder:validation:MinLength=1 diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 169e29c5c5bf..e111d518ab30 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -53,8 +53,12 @@ var ( // your cluster may fail in an unrecoverable way. CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + // OKD turns on features for OKD. Turning this feature set ON is supported for OKD clusters, but NOT for OpenShift clusters. + // Once enabled, this feature set cannot be changed back to Default, but can be changed to other feature sets and it allows upgrades. + OKD FeatureSet = "OKD" + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead - AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade, OKD} ) type FeatureGateSpec struct { @@ -67,10 +71,11 @@ type FeatureGateSelection struct { // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. // +unionDiscriminator // +optional - // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;"" + // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;OKD;"" // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'OKD' ? self != '' : true",message="OKD cannot transition to Default" FeatureSet FeatureSet `json:"featureSet,omitempty"` // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_policy.go index 54bd21adb4ed..3cc46141c9c9 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -51,7 +51,7 @@ type ImagePolicySpec struct { // policy is a required field that contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. // +required - Policy Policy `json:"policy"` + Policy ImageSigstoreVerificationPolicy `json:"policy"` } // +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" @@ -60,8 +60,8 @@ type ImagePolicySpec struct { // +kubebuilder:validation:MaxLength=512 type ImageScope string -// Policy defines the verification policy for the items in the scopes list. -type Policy struct { +// ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list. +type ImageSigstoreVerificationPolicy struct { // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. // +required @@ -82,25 +82,25 @@ type PolicyRootOfTrust struct { // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. - // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). // +unionDiscriminator // +required PolicyType PolicyType `json:"policyType"` // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. // publicKey is required when policyType is PublicKey, and forbidden otherwise. // +optional - PublicKey *PublicKey `json:"publicKey,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrust `json:"publicKey,omitempty"` // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise // For more information about Fulcio and Rekor, please refer to the document at: // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor // +optional - FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrust `json:"fulcioCAWithRekor,omitempty"` // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. // pki is required when policyType is PKI, and forbidden otherwise. // +optional // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI - PKI *PKI `json:"pki,omitempty"` + PKI *ImagePolicyPKIRootOfTrust `json:"pki,omitempty"` } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor @@ -113,8 +113,8 @@ const ( PKIRootOfTrust PolicyType = "PKI" ) -// PublicKey defines the root of trust based on a sigstore public key. -type PublicKey struct { +// ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key. +type ImagePolicyPublicKeyRootOfTrust struct { // keyData is a required field contains inline base64-encoded data for the PEM format public key. // keyData must be at most 8192 characters. // +required @@ -132,8 +132,8 @@ type PublicKey struct { RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. -type FulcioCAWithRekor struct { +// ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key. +type ImagePolicyFulcioCAWithRekorRootOfTrust struct { // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. // fulcioCAData must be at most 8192 characters. // +required @@ -172,8 +172,8 @@ type PolicyFulcioSubject struct { SignedEmail string `json:"signedEmail"` } -// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. -type PKI struct { +// ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type ImagePolicyPKIRootOfTrust struct { // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. // +required // +kubebuilder:validation:MaxLength=8192 diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index effafde644f2..313ed57a4144 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -183,6 +183,17 @@ const ( LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" ) +// DNSRecordsType defines whether api, api-int, and ingress records are provided by +// the internal DNS infrastructure or must be configured external to the cluster. +// +kubebuilder:validation:Enum=Internal;External +// +enum +type DNSRecordsType string + +const ( + DNSRecordsTypeExternal DNSRecordsType = "External" + DNSRecordsTypeInternal DNSRecordsType = "Internal" +) + // PlatformType is a specific supported infrastructure provider. // +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External type PlatformType string @@ -491,6 +502,21 @@ type AWSServiceEndpoint struct { URL string `json:"url"` } +// IPFamilyType represents the IP protocol family that cloud platform resources should use. +// +kubebuilder:validation:Enum=IPv4;DualStackIPv6Primary;DualStackIPv4Primary +type IPFamilyType string + +const ( + // IPv4 indicates that cloud platform resources should use IPv4 addressing only. + IPv4 IPFamilyType = "IPv4" + + // DualStackIPv6Primary indicates that cloud platform resources should use dual-stack networking with IPv6 as primary. + DualStackIPv6Primary IPFamilyType = "DualStackIPv6Primary" + + // DualStackIPv4Primary indicates that cloud platform resources should use dual-stack networking with IPv4 as primary. + DualStackIPv4Primary IPFamilyType = "DualStackIPv4Primary" +) + // AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. // This only includes fields that can be modified in the cluster. type AWSPlatformSpec struct { @@ -536,6 +562,18 @@ type AWSPlatformStatus struct { // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for AWS + // network resources. This controls whether AWS resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AWSDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AWSResourceTag is a tag to apply to AWS resources created for the cluster. @@ -607,6 +645,18 @@ type AzurePlatformStatus struct { // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall // +optional CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for Azure + // network resources. This controls whether Azure resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AzureDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. @@ -649,74 +699,43 @@ const ( AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" ) +// Start: TOMBSTONE + // GCPServiceEndpointName is the name of the GCP Service Endpoint. // +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;IAMCredentials;OAuth;ServiceUsage;Storage;STS -type GCPServiceEndpointName string - -const ( - // GCPServiceEndpointNameCompute is the name used for the GCP Compute Service endpoint. - GCPServiceEndpointNameCompute GCPServiceEndpointName = "Compute" - - // GCPServiceEndpointNameContainer is the name used for the GCP Container Service endpoint. - GCPServiceEndpointNameContainer GCPServiceEndpointName = "Container" - - // GCPServiceEndpointNameCloudResource is the name used for the GCP Resource Manager Service endpoint. - GCPServiceEndpointNameCloudResource GCPServiceEndpointName = "CloudResourceManager" - - // GCPServiceEndpointNameDNS is the name used for the GCP DNS Service endpoint. - GCPServiceEndpointNameDNS GCPServiceEndpointName = "DNS" - - // GCPServiceEndpointNameFile is the name used for the GCP File Service endpoint. - GCPServiceEndpointNameFile GCPServiceEndpointName = "File" - - // GCPServiceEndpointNameIAM is the name used for the GCP IAM Service endpoint. - GCPServiceEndpointNameIAM GCPServiceEndpointName = "IAM" - - // GCPServiceEndpointNameIAMCredentials is the name used for the GCP IAM Credentials Service endpoint. - GCPServiceEndpointNameIAMCredentials GCPServiceEndpointName = "IAMCredentials" - - // GCPServiceEndpointNameOAuth is the name used for the GCP OAuth2 Service endpoint. - GCPServiceEndpointNameOAuth GCPServiceEndpointName = "OAuth" - - // GCPServiceEndpointNameServiceUsage is the name used for the GCP Service Usage Service endpoint. - GCPServiceEndpointNameServiceUsage GCPServiceEndpointName = "ServiceUsage" - - // GCPServiceEndpointNameStorage is the name used for the GCP Storage Service endpoint. - GCPServiceEndpointNameStorage GCPServiceEndpointName = "Storage" - - // GCPServiceEndpointNameSTS is the name used for the GCP STS Service endpoint. - GCPServiceEndpointNameSTS GCPServiceEndpointName = "STS" -) +//type GCPServiceEndpointName string // GCPServiceEndpoint store the configuration of a custom url to // override existing defaults of GCP Services. -type GCPServiceEndpoint struct { - // name is the name of the GCP service whose endpoint is being overridden. - // This must be provided and cannot be empty. - // - // Allowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, - // Storage, and TagManager. - // - // As an example, when setting the name to Compute all requests made by the caller to the GCP Compute - // Service will be directed to the endpoint specified in the url field. - // - // +required - Name GCPServiceEndpointName `json:"name"` +// type GCPServiceEndpoint struct { +// name is the name of the GCP service whose endpoint is being overridden. +// This must be provided and cannot be empty. +// +// Allowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, +// Storage, and TagManager. +// +// As an example, when setting the name to Compute all requests made by the caller to the GCP Compute +// Service will be directed to the endpoint specified in the url field. +// +// +required +// Name GCPServiceEndpointName `json:"name"` - // url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified - // in the name field. - // url is required, must use the scheme https, must not be more than 253 characters in length, - // and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL) - // - // An example of a valid endpoint that overrides the Compute Service: "https://compute-myendpoint1.p.googleapis.com" - // - // +required - // +kubebuilder:validation:MaxLength=253 - // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" - // +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"https\") : true",message="scheme must be https" - // +kubebuilder:validation:XValidation:rule="url(self).getEscapedPath() == \"\" || url(self).getEscapedPath() == \"/\"",message="url must consist only of a scheme and domain. The url path must be empty." - URL string `json:"url"` -} +// url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified +// in the name field. +// url is required, must use the scheme https, must not be more than 253 characters in length, +// and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL) +// +// An example of a valid endpoint that overrides the Compute Service: "https://compute-myendpoint1.p.googleapis.com" +// +// +required +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" +// +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"https\") : true",message="scheme must be https" +// +kubebuilder:validation:XValidation:rule="url(self).getEscapedPath() == \"\" || url(self).getEscapedPath() == \"/\"",message="url must consist only of a scheme and domain. The url path must be empty." +// URL string `json:"url"` +//} + +// End: TOMBSTONE // GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. // This only includes fields that can be modified in the cluster. @@ -772,18 +791,21 @@ type GCPPlatformStatus struct { // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + // This field was introduced and removed under tech preview. // serviceEndpoints specifies endpoints that override the default endpoints // used when creating clients to interact with GCP services. // When not specified, the default endpoint for the GCP region will be used. // Only 1 endpoint override is permitted for each GCP service. // The maximum number of endpoint overrides allowed is 11. + // To avoid conflicts with serialisation, this field name may never be used again. + // Tombstone the field as a reminder. // +listType=map // +listMapKey=name // +kubebuilder:validation:MaxItems=11 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" // +optional // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall - ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` + // ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } // GCPResourceLabel is a label to apply to GCP resources created for the cluster. @@ -983,6 +1005,7 @@ type BareMetalPlatformSpec struct { // BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. // For more information about the network architecture used with the BareMetal platform type, see: // https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type BareMetalPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1035,6 +1058,22 @@ type BareMetalPlatformStatus struct { // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -1111,6 +1150,7 @@ type OpenStackPlatformSpec struct { } // OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type OpenStackPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1167,6 +1207,22 @@ type OpenStackPlatformStatus struct { // +optional LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -1201,6 +1257,7 @@ type OvirtPlatformLoadBalancer struct { type OvirtPlatformSpec struct{} // OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type OvirtPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1247,6 +1304,22 @@ type OvirtPlatformStatus struct { // +kubebuilder:default={"type": "OpenShiftManagedDefault"} // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` } // VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. @@ -1644,6 +1717,7 @@ type VSpherePlatformSpec struct { } // VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type VSpherePlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -1696,6 +1770,22 @@ type VSpherePlatformStatus struct { // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` + // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. // +listType=atomic // +kubebuilder:validation:MaxItems=32 @@ -2069,6 +2159,7 @@ type NutanixPrismElementEndpoint struct { } // NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=OnPremDNSRecords,rule="!has(self.dnsRecordsType) || self.dnsRecordsType == 'Internal' || (has(self.loadBalancer) && self.loadBalancer.type == 'UserManaged')",message="dnsRecordsType may only be set to External when loadBalancer.type is UserManaged" type NutanixPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -2112,6 +2203,22 @@ type NutanixPlatformStatus struct { // +kubebuilder:default={"type": "OpenShiftManagedDefault"} // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` + + // dnsRecordsType determines whether records for api, api-int, and ingress + // are provided by the internal DNS service or externally. + // Allowed values are `Internal`, `External`, and omitted. + // When set to `Internal`, records are provided by the internal infrastructure and + // no additional user configuration is required for the cluster to function. + // When set to `External`, records are not provided by the internal infrastructure + // and must be configured by the user on a DNS server outside the cluster. + // Cluster nodes must use this external server for their upstream DNS requests. + // This value may only be set when loadBalancer.type is set to UserManaged. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `Internal`. + // +openshift:enable:FeatureGate=OnPremDNSRecords + // +optional + DNSRecordsType DNSRecordsType `json:"dnsRecordsType,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 1282f3315816..2f627be11e90 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -79,7 +79,6 @@ type CgroupMode string const ( CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift - CgroupModeV1 CgroupMode = "v1" CgroupModeV2 CgroupMode = "v2" CgroupModeDefault CgroupMode = CgroupModeV2 ) diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index c90d5633f68d..a81ed9f30c80 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -48,7 +48,9 @@ type SchedulerSpec struct { // +optional Profile SchedulerProfile `json:"profile,omitempty"` // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. - // +openshift:enable:FeatureGate=DynamicResourceAllocation + // Deprecated: no longer needed, since DRA is GA starting with 4.21, and + // is enabled by' default in the cluster, this field will be removed in 4.24. + // +openshift:enable:FeatureGate=HyperShiftOnlyDynamicResourceAllocation // +optional ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` // defaultNodeSelector helps set the cluster-wide default node selector to diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index b18ef647c2fc..1e5189796e4c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -4,178 +4,130 @@ package v1 // is used by operators to apply TLS security settings to operands. // +union type TLSSecurityProfile struct { - // type is one of Old, Intermediate, Modern or Custom. Custom provides - // the ability to specify individual TLS security profile parameters. - // Old, Intermediate and Modern are TLS security profiles based on: + // type is one of Old, Intermediate, Modern or Custom. Custom provides the + // ability to specify individual TLS security profile parameters. // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // The profiles are currently based on version 5.0 of the Mozilla Server Side TLS + // configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for + // forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json // - // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers - // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be - // reduced. - // - // Note that the Modern profile is currently not supported because it is not - // yet well adopted by common software libraries. + // The profiles are intent based, so they may change over time as new ciphers are + // developed and existing ciphers are found to be insecure. Depending on + // precisely which ciphers are available to a process, the list may be reduced. // // +unionDiscriminator // +optional Type TLSProfileType `json:"type"` - // old is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + + // old is a TLS profile for use when services need to be accessed by very old + // clients or libraries and should be used only as a last resort. // - // and looks like this (yaml): + // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed + // by the "old" profile ciphers. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS10 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - DHE-RSA-AES128-GCM-SHA256 - // // - DHE-RSA-AES256-GCM-SHA384 - // // - DHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-ECDSA-AES128-SHA256 - // // - ECDHE-RSA-AES128-SHA256 - // // - ECDHE-ECDSA-AES128-SHA - // // - ECDHE-RSA-AES128-SHA - // // - ECDHE-ECDSA-AES256-SHA384 - // // - ECDHE-RSA-AES256-SHA384 - // // - ECDHE-ECDSA-AES256-SHA - // // - ECDHE-RSA-AES256-SHA - // // - DHE-RSA-AES128-SHA256 - // // - DHE-RSA-AES256-SHA256 - // // - AES128-GCM-SHA256 - // // - AES256-GCM-SHA384 - // // - AES128-SHA256 - // // - AES256-SHA256 - // // - AES128-SHA - // // - AES256-SHA - // // - DES-CBC3-SHA // - // minTLSVersion: VersionTLS10 - // // +optional // +nullable Old *OldTLSProfile `json:"old,omitempty"` - // intermediate is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + + // intermediate is a TLS profile for use when you do not need compatibility with + // legacy clients and want to remain highly secure while being compatible with + // most clients currently in use. // - // and looks like this (yaml): + // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed + // by the "intermediate" profile ciphers. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS12 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES256-GCM-SHA384 - // // - ECDHE-RSA-AES256-GCM-SHA384 - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - DHE-RSA-AES128-GCM-SHA256 - // // - DHE-RSA-AES256-GCM-SHA384 // - // minTLSVersion: VersionTLS12 - // // +optional // +nullable Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` - // modern is a TLS security profile based on: - // - // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - // - // and looks like this (yaml): + + // modern is a TLS security profile for use with clients that support TLS 1.3 and + // do not need backward compatibility for older clients. // + // This profile is equivalent to a Custom profile specified as: + // minTLSVersion: VersionTLS13 // ciphers: - // // - TLS_AES_128_GCM_SHA256 - // // - TLS_AES_256_GCM_SHA384 - // // - TLS_CHACHA20_POLY1305_SHA256 // - // minTLSVersion: VersionTLS13 - // // +optional // +nullable Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom // profile as invalid configurations can be catastrophic. An example custom profile // looks like this: // + // minTLSVersion: VersionTLS11 // ciphers: - // // - ECDHE-ECDSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-CHACHA20-POLY1305 - // // - ECDHE-RSA-AES128-GCM-SHA256 - // // - ECDHE-ECDSA-AES128-GCM-SHA256 // - // minTLSVersion: VersionTLS11 - // // +optional // +nullable Custom *CustomTLSProfile `json:"custom,omitempty"` } -// OldTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +// OldTLSProfile is a TLS security profile based on the "old" configuration of +// the Mozilla Server Side TLS configuration guidelines. type OldTLSProfile struct{} -// IntermediateTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +// IntermediateTLSProfile is a TLS security profile based on the "intermediate" +// configuration of the Mozilla Server Side TLS configuration guidelines. type IntermediateTLSProfile struct{} -// ModernTLSProfile is a TLS security profile based on: -// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +// ModernTLSProfile is a TLS security profile based on the "modern" configuration +// of the Mozilla Server Side TLS configuration guidelines. type ModernTLSProfile struct{} // CustomTLSProfile is a user-defined TLS security profile. Be extremely careful @@ -189,16 +141,19 @@ type CustomTLSProfile struct { type TLSProfileType string const ( - // Old is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // TLSProfileOldType sets parameters based on the "old" configuration of + // the Mozilla Server Side TLS configuration guidelines. TLSProfileOldType TLSProfileType = "Old" - // Intermediate is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + + // TLSProfileIntermediateType sets parameters based on the "intermediate" + // configuration of the Mozilla Server Side TLS configuration guidelines. TLSProfileIntermediateType TLSProfileType = "Intermediate" - // Modern is a TLS security profile based on: - // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + + // TLSProfileModernType sets parameters based on the "modern" configuration + // of the Mozilla Server Side TLS configuration guidelines. TLSProfileModernType TLSProfileType = "Modern" - // Custom is a TLS security profile that allows for user-defined parameters. + + // TLSProfileCustomType is a TLS security profile that allows for user-defined parameters. TLSProfileCustomType TLSProfileType = "Custom" ) @@ -219,8 +174,6 @@ type TLSProfileSpec struct { // // minTLSVersion: VersionTLS11 // - // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 - // MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` } @@ -245,11 +198,16 @@ const ( VersionTLS13 TLSProtocolVersion = "VersionTLS13" ) -// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// TLSProfiles contains a map of TLSProfileType names to TLSProfileSpec. +// +// These profiles are based on version 5.0 of the Mozilla Server Side TLS +// configuration guidelines (2019-06-28) with TLS 1.3 cipher suites prepended for +// forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json // -// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all -// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, -// just be sure to whitelist only and everything will be ok. +// NOTE: The caller needs to make sure to check that these constants are valid +// for their binary. Not all entries map to values for all binaries. In the case +// of ties, the kube-apiserver wins. Do not fail, just be sure to include only +// valid entries and everything will be ok. var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ TLSProfileOldType: { Ciphers: []string{ diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 0863934f22a6..30b85b78e966 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -316,6 +316,22 @@ func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceptRisk) DeepCopyInto(out *AcceptRisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceptRisk. +func (in *AcceptRisk) DeepCopy() *AcceptRisk { + if in == nil { + return nil + } + out := new(AcceptRisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { *out = *in @@ -1393,7 +1409,7 @@ func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { if in.DesiredUpdate != nil { in, out := &in.DesiredUpdate, &out.DesiredUpdate *out = new(Update) - **out = **in + (*in).DeepCopyInto(*out) } if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities @@ -1456,6 +1472,13 @@ func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ConditionalUpdateRisks != nil { + in, out := &in.ConditionalUpdateRisks, &out.ConditionalUpdateRisks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1544,6 +1567,11 @@ func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { *out = *in in.Release.DeepCopyInto(&out.Release) + if in.RiskNames != nil { + in, out := &in.RiskNames, &out.RiskNames + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Risks != nil { in, out := &in.Risks, &out.Risks *out = make([]ConditionalUpdateRisk, len(*in)) @@ -1574,6 +1602,13 @@ func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.MatchingRules != nil { in, out := &in.MatchingRules, &out.MatchingRules *out = make([]ClusterCondition, len(*in)) @@ -2361,33 +2396,6 @@ func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { - *out = *in - if in.FulcioCAData != nil { - in, out := &in.FulcioCAData, &out.FulcioCAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.RekorKeyData != nil { - in, out := &in.RekorKeyData, &out.RekorKeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - out.FulcioSubject = in.FulcioSubject - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. -func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { - if in == nil { - return nil - } - out := new(FulcioCAWithRekor) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -2422,11 +2430,6 @@ func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { *out = new(CloudLoadBalancerConfig) (*in).DeepCopyInto(*out) } - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]GCPServiceEndpoint, len(*in)) - copy(*out, *in) - } return } @@ -2472,22 +2475,6 @@ func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPServiceEndpoint) DeepCopyInto(out *GCPServiceEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceEndpoint. -func (in *GCPServiceEndpoint) DeepCopy() *GCPServiceEndpoint { - if in == nil { - return nil - } - out := new(GCPServiceEndpoint) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { *out = *in @@ -3165,6 +3152,33 @@ func (in *ImagePolicy) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopyInto(out *ImagePolicyFulcioCAWithRekorRootOfTrust) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyFulcioCAWithRekorRootOfTrust. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopy() *ImagePolicyFulcioCAWithRekorRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyFulcioCAWithRekorRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { *out = *in @@ -3198,6 +3212,59 @@ func (in *ImagePolicyList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPKIRootOfTrust) DeepCopyInto(out *ImagePolicyPKIRootOfTrust) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPKIRootOfTrust. +func (in *ImagePolicyPKIRootOfTrust) DeepCopy() *ImagePolicyPKIRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPKIRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopyInto(out *ImagePolicyPublicKeyRootOfTrust) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPublicKeyRootOfTrust. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopy() *ImagePolicyPublicKeyRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPublicKeyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { *out = *in @@ -3243,6 +3310,28 @@ func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSigstoreVerificationPolicy) DeepCopyInto(out *ImageSigstoreVerificationPolicy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSigstoreVerificationPolicy. +func (in *ImageSigstoreVerificationPolicy) DeepCopy() *ImageSigstoreVerificationPolicy { + if in == nil { + return nil + } + out := new(ImageSigstoreVerificationPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -4753,6 +4842,11 @@ func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]TokenUserValidationRule, len(*in)) + copy(*out, *in) + } return } @@ -5130,33 +5224,6 @@ func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PKI) DeepCopyInto(out *PKI) { - *out = *in - if in.CertificateAuthorityRootsData != nil { - in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CertificateAuthorityIntermediatesData != nil { - in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - out.PKICertificateSubject = in.PKICertificateSubject - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. -func (in *PKI) DeepCopy() *PKI { - if in == nil { - return nil - } - out := new(PKI) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { *out = *in @@ -5378,28 +5445,6 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) - if in.SignedIdentity != nil { - in, out := &in.SignedIdentity, &out.SignedIdentity - *out = new(PolicyIdentity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { *out = *in @@ -5479,17 +5524,17 @@ func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { *out = *in if in.PublicKey != nil { in, out := &in.PublicKey, &out.PublicKey - *out = new(PublicKey) + *out = new(ImagePolicyPublicKeyRootOfTrust) (*in).DeepCopyInto(*out) } if in.FulcioCAWithRekor != nil { in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor - *out = new(FulcioCAWithRekor) + *out = new(ImagePolicyFulcioCAWithRekorRootOfTrust) (*in).DeepCopyInto(*out) } if in.PKI != nil { in, out := &in.PKI, &out.PKI - *out = new(PKI) + *out = new(ImagePolicyPKIRootOfTrust) (*in).DeepCopyInto(*out) } return @@ -5805,32 +5850,6 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PublicKey) DeepCopyInto(out *PublicKey) { - *out = *in - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.RekorKeyData != nil { - in, out := &in.RekorKeyData, &out.RekorKeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. -func (in *PublicKey) DeepCopy() *PublicKey { - if in == nil { - return nil - } - out := new(PublicKey) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { *out = *in @@ -6426,6 +6445,22 @@ func (in *TokenClaimOrExpressionMapping) DeepCopy() *TokenClaimOrExpressionMappi return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationCELRule) DeepCopyInto(out *TokenClaimValidationCELRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationCELRule. +func (in *TokenClaimValidationCELRule) DeepCopy() *TokenClaimValidationCELRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationCELRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { *out = *in @@ -6434,6 +6469,7 @@ func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) *out = new(TokenRequiredClaim) **out = **in } + out.CEL = in.CEL return } @@ -6506,9 +6542,30 @@ func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenUserValidationRule) DeepCopyInto(out *TokenUserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenUserValidationRule. +func (in *TokenUserValidationRule) DeepCopy() *TokenUserValidationRule { + if in == nil { + return nil + } + out := new(TokenUserValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Update) DeepCopyInto(out *Update) { *out = *in + if in.AcceptRisks != nil { + in, out := &in.AcceptRisks, &out.AcceptRisks + *out = make([]AcceptRisk, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index d8d6b502eea2..5d4794e4bac5 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -31,6 +31,7 @@ authentications.config.openshift.io: FeatureGates: - ExternalOIDC - ExternalOIDCWithUIDAndExtraClaimMappings + - ExternalOIDCWithUpstreamParity FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -141,6 +142,7 @@ clusterversions.config.openshift.io: Capability: "" Category: "" FeatureGates: + - ClusterUpdateAcceptRisks - ImageStreamImportMode - SignatureStores FilenameOperatorName: cluster-version-operator @@ -362,14 +364,16 @@ infrastructures.config.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNSInstall + - AWSDualStackInstall - AzureClusterHostedDNSInstall + - AzureDualStackInstall - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNSInstall - - GCPCustomAPIEndpointsInstall - HighlyAvailableArbiter - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets + - OnPremDNSRecords - VSphereHostVMGroupZonal - VSphereMultiNetworks FilenameOperatorName: config-operator @@ -572,7 +576,7 @@ schedulers.config.openshift.io: Capability: "" Category: "" FeatureGates: - - DynamicResourceAllocation + - HyperShiftOnlyDynamicResourceAllocation FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 31aab4dfe89d..e7bc0aebb336 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -449,6 +449,7 @@ var map_OIDCProvider = map[string]string{ "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", + "userValidationRules": "userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -494,9 +495,20 @@ func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { return map_TokenClaimOrExpressionMapping } +var map_TokenClaimValidationCELRule = map[string]string{ + "expression": "expression is a CEL expression evaluated against token claims. expression is required, must be at least 1 character in length and must not exceed 1024 characters. The expression must return a boolean value where 'true' signals a valid token and 'false' an invalid one.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenClaimValidationCELRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationCELRule +} + var map_TokenClaimValidationRule = map[string]string{ - "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are 'RequiredClaim' and omitted (not provided or an empty string).\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nDefaults to 'RequiredClaim'.", - "requiredClaim": "requiredClaim is an optional field that configures the required claim and value that the Kubernetes API server will use to validate if an incoming JWT is valid for this identity provider.", + "": "TokenClaimValidationRule represents a validation rule based on token claims. If type is RequiredClaim, requiredClaim must be set. If Type is CEL, CEL must be set and RequiredClaim must be omitted.", + "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are \"RequiredClaim\" and \"CEL\".\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nWhen set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression.", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value. This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider.", + "cel": "cel holds the CEL expression and message for validation. Must be set when Type is \"CEL\", and forbidden otherwise.", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -507,6 +519,7 @@ var map_TokenIssuer = map[string]string{ "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", + "discoveryURL": "discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` as \"{issuerURL}/.well-known/openid-configuration\".\n\nThe discoveryURL must be a valid absolute HTTPS URL. It must not contain query parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters.", } func (TokenIssuer) SwaggerDoc() map[string]string { @@ -522,6 +535,16 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { return map_TokenRequiredClaim } +var map_TokenUserValidationRule = map[string]string{ + "": "TokenUserValidationRule provides a CEL-based rule used to validate a token subject. Each rule contains a CEL expression that is evaluated against the token’s claims.", + "expression": "expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. The expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. expression must be at least 1 character in length and must not exceed 1024 characters.", + "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", +} + +func (TokenUserValidationRule) SwaggerDoc() map[string]string { + return map_TokenUserValidationRule +} + var map_UsernameClaimMapping = map[string]string{ "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. As an example, consider the following scenario:\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", @@ -724,6 +747,15 @@ func (OperandVersion) SwaggerDoc() map[string]string { return map_OperandVersion } +var map_AcceptRisk = map[string]string{ + "": "AcceptRisk represents a risk that is considered acceptable.", + "name": "name is the name of the acceptable risk. It must be a non-empty string and must not exceed 256 characters.", +} + +func (AcceptRisk) SwaggerDoc() map[string]string { + return map_AcceptRisk +} + var map_ClusterCondition = map[string]string{ "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", @@ -777,7 +809,7 @@ func (ClusterVersionList) SwaggerDoc() map[string]string { var map_ClusterVersionSpec = map[string]string{ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted if the previous version is within the current minor version. Not all rollbacks will succeed, and some may unrecoverably break the cluster.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", @@ -790,15 +822,16 @@ func (ClusterVersionSpec) SwaggerDoc() map[string]string { } var map_ClusterVersionStatus = map[string]string{ - "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", - "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", - "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", - "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", - "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", - "capabilities": "capabilities describes the state of optional, core cluster components.", - "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", - "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", - "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "capabilities": "capabilities describes the state of optional, core cluster components.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", + "conditionalUpdateRisks": "conditionalUpdateRisks contains the list of risks associated with conditionalUpdates. When performing a conditional update, all its associated risks will be compared with the set of accepted risks in the spec.desiredUpdate.acceptRisks field. If all risks for a conditional update are included in the spec.desiredUpdate.acceptRisks set, the conditional update can proceed, otherwise it is blocked. The risk names in the list must be unique. conditionalUpdateRisks must not contain more than 500 entries.", } func (ClusterVersionStatus) SwaggerDoc() map[string]string { @@ -821,6 +854,7 @@ func (ComponentOverride) SwaggerDoc() map[string]string { var map_ConditionalUpdate = map[string]string{ "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", "release": "release is the target of the update.", + "riskNames": "riskNames represents the set of the names of conditionalUpdateRisks that are relevant to this update for some clusters. The Applies condition of each conditionalUpdateRisks entry declares if that risk applies to this cluster. A conditional update is accepted only if each of its risks either does not apply to the cluster or is considered acceptable by the cluster administrator. The latter means that the risk names are included in value of the spec.desiredUpdate.acceptRisks field. Entries must be unique and must not exceed 256 characters. riskNames must not contain more than 500 entries.", "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.", } @@ -831,6 +865,7 @@ func (ConditionalUpdate) SwaggerDoc() map[string]string { var map_ConditionalUpdateRisk = map[string]string{ "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "conditions": "conditions represents the observations of the conditional update risk's current status. Known types are: * Applies, for whether the risk applies to the current cluster. The condition's types in the list must be unique. conditions must not contain more than one entry.", "url": "url contains information about this risk.", "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", @@ -878,7 +913,8 @@ var map_Update = map[string]string{ "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", "version": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", - "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks that are designed to keep your cluster safe. Only use this if: * you are testing unsigned release images in short-lived test clusters or * you are working around a known bug in the cluster-version\n operator and you have verified the authenticity of the provided\n image yourself.\nThe provided image will run with full administrative access to the cluster. Do not use this flag with images that come from unknown or potentially malicious sources.", + "acceptRisks": "acceptRisks is an optional set of names of conditional update risks that are considered acceptable. A conditional update is performed only if all of its risks are acceptable. This list may contain entries that apply to current, previous or future updates. The entries therefore may not map directly to a risk in .status.conditionalUpdateRisks. acceptRisks must not contain more than 1000 entries. Entries in this list must be unique.", } func (Update) SwaggerDoc() map[string]string { @@ -893,7 +929,7 @@ var map_UpdateHistory = map[string]string{ "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", - "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may mention an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -1214,17 +1250,6 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { return map_ImageDigestMirrors } -var map_FulcioCAWithRekor = map[string]string{ - "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", - "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", - "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", - "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", -} - -func (FulcioCAWithRekor) SwaggerDoc() map[string]string { - return map_FulcioCAWithRekor -} - var map_ImagePolicy = map[string]string{ "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1236,6 +1261,17 @@ func (ImagePolicy) SwaggerDoc() map[string]string { return map_ImagePolicy } +var map_ImagePolicyFulcioCAWithRekorRootOfTrust = map[string]string{ + "": "ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (ImagePolicyFulcioCAWithRekorRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyFulcioCAWithRekorRootOfTrust +} + var map_ImagePolicyList = map[string]string{ "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1246,6 +1282,27 @@ func (ImagePolicyList) SwaggerDoc() map[string]string { return map_ImagePolicyList } +var map_ImagePolicyPKIRootOfTrust = map[string]string{ + "": "ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (ImagePolicyPKIRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPKIRootOfTrust +} + +var map_ImagePolicyPublicKeyRootOfTrust = map[string]string{ + "": "ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (ImagePolicyPublicKeyRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPublicKeyRootOfTrust +} + var map_ImagePolicySpec = map[string]string{ "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", @@ -1264,15 +1321,14 @@ func (ImagePolicyStatus) SwaggerDoc() map[string]string { return map_ImagePolicyStatus } -var map_PKI = map[string]string{ - "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", - "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", - "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", - "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +var map_ImageSigstoreVerificationPolicy = map[string]string{ + "": "ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", } -func (PKI) SwaggerDoc() map[string]string { - return map_PKI +func (ImageSigstoreVerificationPolicy) SwaggerDoc() map[string]string { + return map_ImageSigstoreVerificationPolicy } var map_PKICertificateSubject = map[string]string{ @@ -1285,16 +1341,6 @@ func (PKICertificateSubject) SwaggerDoc() map[string]string { return map_PKICertificateSubject } -var map_Policy = map[string]string{ - "": "Policy defines the verification policy for the items in the scopes list.", - "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", - "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", -} - -func (Policy) SwaggerDoc() map[string]string { - return map_Policy -} - var map_PolicyFulcioSubject = map[string]string{ "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", @@ -1335,7 +1381,7 @@ func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { var map_PolicyRootOfTrust = map[string]string{ "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", - "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI).", "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", @@ -1345,16 +1391,6 @@ func (PolicyRootOfTrust) SwaggerDoc() map[string]string { return map_PolicyRootOfTrust } -var map_PublicKey = map[string]string{ - "": "PublicKey defines the root of trust based on a sigstore public key.", - "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", - "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", -} - -func (PublicKey) SwaggerDoc() map[string]string { - return map_PublicKey -} - var map_ImageTagMirrorSet = map[string]string{ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1410,6 +1446,7 @@ var map_AWSPlatformStatus = map[string]string{ "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for AWS network resources. This controls whether AWS resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AWSPlatformStatus) SwaggerDoc() map[string]string { @@ -1481,6 +1518,7 @@ var map_AzurePlatformStatus = map[string]string{ "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for Azure network resources. This controls whether Azure resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -1525,6 +1563,7 @@ var map_BareMetalPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -1613,7 +1652,6 @@ var map_GCPPlatformStatus = map[string]string{ "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", - "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 11.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -1641,16 +1679,6 @@ func (GCPResourceTag) SwaggerDoc() map[string]string { return map_GCPResourceTag } -var map_GCPServiceEndpoint = map[string]string{ - "": "GCPServiceEndpoint store the configuration of a custom url to override existing defaults of GCP Services.", - "name": "name is the name of the GCP service whose endpoint is being overridden. This must be provided and cannot be empty.\n\nAllowed values are Compute, Container, CloudResourceManager, DNS, File, IAM, ServiceUsage, Storage, and TagManager.\n\nAs an example, when setting the name to Compute all requests made by the caller to the GCP Compute Service will be directed to the endpoint specified in the url field.", - "url": "url is a fully qualified URI that overrides the default endpoint for a client using the GCP service specified in the name field. url is required, must use the scheme https, must not be more than 253 characters in length, and must be a valid URL according to Go's net/url package (https://pkg.go.dev/net/url#URL)\n\nAn example of a valid endpoint that overrides the Compute Service: \"https://compute-myendpoint1.p.googleapis.com\"", -} - -func (GCPServiceEndpoint) SwaggerDoc() map[string]string { - return map_GCPServiceEndpoint -} - var map_IBMCloudPlatformSpec = map[string]string{ "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", @@ -1787,6 +1815,7 @@ var map_NutanixPlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", } func (NutanixPlatformStatus) SwaggerDoc() map[string]string { @@ -1853,6 +1882,7 @@ var map_OpenStackPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -1885,6 +1915,7 @@ var map_OvirtPlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", } func (OvirtPlatformStatus) SwaggerDoc() map[string]string { @@ -2067,6 +2098,7 @@ var map_VSpherePlatformStatus = map[string]string{ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", + "dnsRecordsType": "dnsRecordsType determines whether records for api, api-int, and ingress are provided by the internal DNS service or externally. Allowed values are `Internal`, `External`, and omitted. When set to `Internal`, records are provided by the internal infrastructure and no additional user configuration is required for the cluster to function. When set to `External`, records are not provided by the internal infrastructure and must be configured by the user on a DNS server outside the cluster. Cluster nodes must use this external server for their upstream DNS requests. This value may only be set when loadBalancer.type is set to UserManaged. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `Internal`.", "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.", } @@ -2894,7 +2926,7 @@ func (SchedulerList) SwaggerDoc() map[string]string { var map_SchedulerSpec = map[string]string{ "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", - "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", + "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. Deprecated: no longer needed, since DRA is GA starting with 4.21, and is enabled by' default in the cluster, this field will be removed in 4.24.", "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", } @@ -2947,7 +2979,7 @@ func (CustomTLSProfile) SwaggerDoc() map[string]string { } var map_IntermediateTLSProfile = map[string]string{ - "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", + "": "IntermediateTLSProfile is a TLS security profile based on the \"intermediate\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (IntermediateTLSProfile) SwaggerDoc() map[string]string { @@ -2955,7 +2987,7 @@ func (IntermediateTLSProfile) SwaggerDoc() map[string]string { } var map_ModernTLSProfile = map[string]string{ - "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", + "": "ModernTLSProfile is a TLS security profile based on the \"modern\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (ModernTLSProfile) SwaggerDoc() map[string]string { @@ -2963,7 +2995,7 @@ func (ModernTLSProfile) SwaggerDoc() map[string]string { } var map_OldTLSProfile = map[string]string{ - "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", + "": "OldTLSProfile is a TLS security profile based on the \"old\" configuration of the Mozilla Server Side TLS configuration guidelines.", } func (OldTLSProfile) SwaggerDoc() map[string]string { @@ -2973,7 +3005,7 @@ func (OldTLSProfile) SwaggerDoc() map[string]string { var map_TLSProfileSpec = map[string]string{ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", - "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11", } func (TLSProfileSpec) SwaggerDoc() map[string]string { @@ -2982,11 +3014,11 @@ func (TLSProfileSpec) SwaggerDoc() map[string]string { var map_TLSSecurityProfile = map[string]string{ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", - "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", - "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n - DHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-ECDSA-AES128-SHA256\n\n - ECDHE-RSA-AES128-SHA256\n\n - ECDHE-ECDSA-AES128-SHA\n\n - ECDHE-RSA-AES128-SHA\n\n - ECDHE-ECDSA-AES256-SHA384\n\n - ECDHE-RSA-AES256-SHA384\n\n - ECDHE-ECDSA-AES256-SHA\n\n - ECDHE-RSA-AES256-SHA\n\n - DHE-RSA-AES128-SHA256\n\n - DHE-RSA-AES256-SHA256\n\n - AES128-GCM-SHA256\n\n - AES256-GCM-SHA384\n\n - AES128-SHA256\n\n - AES256-SHA256\n\n - AES128-SHA\n\n - AES256-SHA\n\n - DES-CBC3-SHA\n\n minTLSVersion: VersionTLS10", - "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n minTLSVersion: VersionTLS12", - "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n minTLSVersion: VersionTLS13", - "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n minTLSVersion: VersionTLS11", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are currently based on version 5.0 of the Mozilla Server Side TLS configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"old\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", + "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"intermediate\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384", + "modern": "modern is a TLS security profile for use with clients that support TLS 1.3 and do not need backward compatibility for older clients.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS13\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n minTLSVersion: VersionTLS11\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256", } func (TLSSecurityProfile) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index 107b9e29a484..e8d7603d7b69 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -51,7 +51,7 @@ type ClusterImagePolicySpec struct { // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. // +required - Policy Policy `json:"policy"` + Policy ImageSigstoreVerificationPolicy `json:"policy"` } // +k8s:deepcopy-gen=true diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index f6d4cd3420b1..0653eeb5a5ed 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -33,7 +33,7 @@ import ( // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1929 // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +kubebuilder:object:root=true -// +kubebuilder:resource:path=clustermonitoring,scope=Cluster +// +kubebuilder:resource:path=clustermonitorings,scope=Cluster // +kubebuilder:subresource:status // +kubebuilder:metadata:annotations="description=Cluster Monitoring Operators configuration API" // +openshift:enable:FeatureGate=ClusterMonitoringConfig diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index 64a89e4a63f1..977ca3dde325 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -50,7 +50,7 @@ type ImagePolicySpec struct { // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. // +required - Policy Policy `json:"policy"` + Policy ImageSigstoreVerificationPolicy `json:"policy"` } // +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" @@ -59,8 +59,8 @@ type ImagePolicySpec struct { // +kubebuilder:validation:MaxLength=512 type ImageScope string -// Policy defines the verification policy for the items in the scopes list. -type Policy struct { +// ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list. +type ImageSigstoreVerificationPolicy struct { // rootOfTrust specifies the root of trust for the policy. // +required RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` @@ -84,16 +84,16 @@ type PolicyRootOfTrust struct { PolicyType PolicyType `json:"policyType"` // publicKey defines the root of trust based on a sigstore public key. // +optional - PublicKey *PublicKey `json:"publicKey,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrust `json:"publicKey,omitempty"` // fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. // For more information about Fulcio and Rekor, please refer to the document at: // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor // +optional - FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrust `json:"fulcioCAWithRekor,omitempty"` // pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. // +optional // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI - PKI *PKI `json:"pki,omitempty"` + PKI *ImagePolicyPKIRootOfTrust `json:"pki,omitempty"` } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor @@ -106,8 +106,8 @@ const ( PKIRootOfTrust PolicyType = "PKI" ) -// PublicKey defines the root of trust based on a sigstore public key. -type PublicKey struct { +// ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key. +type ImagePolicyPublicKeyRootOfTrust struct { // keyData contains inline base64-encoded data for the PEM format public key. // KeyData must be at most 8192 characters. // +required @@ -120,8 +120,8 @@ type PublicKey struct { RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. -type FulcioCAWithRekor struct { +// ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key. +type ImagePolicyFulcioCAWithRekorRootOfTrust struct { // fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. // fulcioCAData must be at most 8192 characters. // +required @@ -151,8 +151,8 @@ type PolicyFulcioSubject struct { SignedEmail string `json:"signedEmail"` } -// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. -type PKI struct { +// ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type ImagePolicyPKIRootOfTrust struct { // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. // +required // +kubebuilder:validation:MaxLength=8192 diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index 6549f6cbe437..9ead6aba26b3 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -429,33 +429,6 @@ func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { - *out = *in - if in.FulcioCAData != nil { - in, out := &in.FulcioCAData, &out.FulcioCAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.RekorKeyData != nil { - in, out := &in.RekorKeyData, &out.RekorKeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - out.FulcioSubject = in.FulcioSubject - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. -func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { - if in == nil { - return nil - } - out := new(FulcioCAWithRekor) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { *out = *in @@ -510,6 +483,33 @@ func (in *ImagePolicy) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopyInto(out *ImagePolicyFulcioCAWithRekorRootOfTrust) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyFulcioCAWithRekorRootOfTrust. +func (in *ImagePolicyFulcioCAWithRekorRootOfTrust) DeepCopy() *ImagePolicyFulcioCAWithRekorRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyFulcioCAWithRekorRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { *out = *in @@ -543,6 +543,59 @@ func (in *ImagePolicyList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPKIRootOfTrust) DeepCopyInto(out *ImagePolicyPKIRootOfTrust) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPKIRootOfTrust. +func (in *ImagePolicyPKIRootOfTrust) DeepCopy() *ImagePolicyPKIRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPKIRootOfTrust) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopyInto(out *ImagePolicyPublicKeyRootOfTrust) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyPublicKeyRootOfTrust. +func (in *ImagePolicyPublicKeyRootOfTrust) DeepCopy() *ImagePolicyPublicKeyRootOfTrust { + if in == nil { + return nil + } + out := new(ImagePolicyPublicKeyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { *out = *in @@ -588,6 +641,24 @@ func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSigstoreVerificationPolicy) DeepCopyInto(out *ImageSigstoreVerificationPolicy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + in.SignedIdentity.DeepCopyInto(&out.SignedIdentity) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSigstoreVerificationPolicy. +func (in *ImageSigstoreVerificationPolicy) DeepCopy() *ImageSigstoreVerificationPolicy { + if in == nil { + return nil + } + out := new(ImageSigstoreVerificationPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { *out = *in @@ -727,33 +798,6 @@ func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PKI) DeepCopyInto(out *PKI) { - *out = *in - if in.CertificateAuthorityRootsData != nil { - in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CertificateAuthorityIntermediatesData != nil { - in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - out.PKICertificateSubject = in.PKICertificateSubject - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. -func (in *PKI) DeepCopy() *PKI { - if in == nil { - return nil - } - out := new(PKI) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { *out = *in @@ -803,24 +847,6 @@ func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) - in.SignedIdentity.DeepCopyInto(&out.SignedIdentity) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { *out = *in @@ -900,17 +926,17 @@ func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { *out = *in if in.PublicKey != nil { in, out := &in.PublicKey, &out.PublicKey - *out = new(PublicKey) + *out = new(ImagePolicyPublicKeyRootOfTrust) (*in).DeepCopyInto(*out) } if in.FulcioCAWithRekor != nil { in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor - *out = new(FulcioCAWithRekor) + *out = new(ImagePolicyFulcioCAWithRekorRootOfTrust) (*in).DeepCopyInto(*out) } if in.PKI != nil { in, out := &in.PKI, &out.PKI - *out = new(PKI) + *out = new(ImagePolicyPKIRootOfTrust) (*in).DeepCopyInto(*out) } return @@ -926,32 +952,6 @@ func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PublicKey) DeepCopyInto(out *PublicKey) { - *out = *in - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.RekorKeyData != nil { - in, out := &in.RekorKeyData, &out.RekorKeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. -func (in *PublicKey) DeepCopy() *PublicKey { - if in == nil { - return nil - } - out := new(PublicKey) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index b9dca71a9259..2f79f801dd40 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -45,11 +45,11 @@ clusterimagepolicies.config.openshift.io: - SigstoreImageVerification Version: v1alpha1 -clustermonitoring.config.openshift.io: +clustermonitorings.config.openshift.io: Annotations: description: Cluster Monitoring Operators configuration API ApprovedPRNumber: https://github.com/openshift/api/pull/1929 - CRDName: clustermonitoring.config.openshift.io + CRDName: clustermonitorings.config.openshift.io Capability: "" Category: "" FeatureGates: @@ -61,7 +61,7 @@ clustermonitoring.config.openshift.io: HasStatus: true KindName: ClusterMonitoring Labels: {} - PluralName: clustermonitoring + PluralName: clustermonitorings PrinterColumns: [] Scope: Cluster ShortNames: null diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 6ba6ad11f440..59a5b3708568 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -226,17 +226,6 @@ func (UserDefinedMonitoring) SwaggerDoc() map[string]string { return map_UserDefinedMonitoring } -var map_FulcioCAWithRekor = map[string]string{ - "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", - "fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.", - "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", - "fulcioSubject": "fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.", -} - -func (FulcioCAWithRekor) SwaggerDoc() map[string]string { - return map_FulcioCAWithRekor -} - var map_ImagePolicy = map[string]string{ "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -248,6 +237,17 @@ func (ImagePolicy) SwaggerDoc() map[string]string { return map_ImagePolicy } +var map_ImagePolicyFulcioCAWithRekorRootOfTrust = map[string]string{ + "": "ImagePolicyFulcioCAWithRekorRootOfTrust defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.", + "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", + "fulcioSubject": "fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (ImagePolicyFulcioCAWithRekorRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyFulcioCAWithRekorRootOfTrust +} + var map_ImagePolicyList = map[string]string{ "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -257,6 +257,27 @@ func (ImagePolicyList) SwaggerDoc() map[string]string { return map_ImagePolicyList } +var map_ImagePolicyPKIRootOfTrust = map[string]string{ + "": "ImagePolicyPKIRootOfTrust defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (ImagePolicyPKIRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPKIRootOfTrust +} + +var map_ImagePolicyPublicKeyRootOfTrust = map[string]string{ + "": "ImagePolicyPublicKeyRootOfTrust defines the root of trust based on a sigstore public key.", + "keyData": "keyData contains inline base64-encoded data for the PEM format public key. KeyData must be at most 8192 characters.", + "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", +} + +func (ImagePolicyPublicKeyRootOfTrust) SwaggerDoc() map[string]string { + return map_ImagePolicyPublicKeyRootOfTrust +} + var map_ImagePolicySpec = map[string]string{ "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", @@ -275,15 +296,14 @@ func (ImagePolicyStatus) SwaggerDoc() map[string]string { return map_ImagePolicyStatus } -var map_PKI = map[string]string{ - "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", - "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", - "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", - "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +var map_ImageSigstoreVerificationPolicy = map[string]string{ + "": "ImageSigstoreVerificationPolicy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust specifies the root of trust for the policy.", + "signedIdentity": "signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", } -func (PKI) SwaggerDoc() map[string]string { - return map_PKI +func (ImageSigstoreVerificationPolicy) SwaggerDoc() map[string]string { + return map_ImageSigstoreVerificationPolicy } var map_PKICertificateSubject = map[string]string{ @@ -296,16 +316,6 @@ func (PKICertificateSubject) SwaggerDoc() map[string]string { return map_PKICertificateSubject } -var map_Policy = map[string]string{ - "": "Policy defines the verification policy for the items in the scopes list.", - "rootOfTrust": "rootOfTrust specifies the root of trust for the policy.", - "signedIdentity": "signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", -} - -func (Policy) SwaggerDoc() map[string]string { - return map_Policy -} - var map_PolicyFulcioSubject = map[string]string{ "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", "oidcIssuer": "oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", @@ -356,16 +366,6 @@ func (PolicyRootOfTrust) SwaggerDoc() map[string]string { return map_PolicyRootOfTrust } -var map_PublicKey = map[string]string{ - "": "PublicKey defines the root of trust based on a sigstore public key.", - "keyData": "keyData contains inline base64-encoded data for the PEM format public key. KeyData must be at most 8192 characters.", - "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.", -} - -func (PublicKey) SwaggerDoc() map[string]string { - return map_PublicKey -} - var map_GatherConfig = map[string]string{ "": "gatherConfig provides data gathering configuration options.", "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", diff --git a/vendor/github.com/openshift/api/console/v1/types.go b/vendor/github.com/openshift/api/console/v1/types.go index 416eaa3e873a..24dcd5ca0b8c 100644 --- a/vendor/github.com/openshift/api/console/v1/types.go +++ b/vendor/github.com/openshift/api/console/v1/types.go @@ -4,7 +4,7 @@ package v1 type Link struct { // text is the display text for the link Text string `json:"text"` - // href is the absolute secure URL for the link (must use https) - // +kubebuilder:validation:Pattern=`^https://` + // href is the absolute URL for the link. Must use https:// for web URLs or mailto: for email links. + // +kubebuilder:validation:Pattern=`^(https://|mailto:)` Href string `json:"href"` } diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index a02cbf7c1760..606b95cafcd8 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -14,7 +14,7 @@ package v1 var map_Link = map[string]string{ "": "Represents a standard link that could be generated in HTML", "text": "text is the display text for the link", - "href": "href is the absolute secure URL for the link (must use https)", + "href": "href is the absolute URL for the link. Must use https:// for web URLs or mailto: for email links.", } func (Link) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index e8688e2b0610..e495e02796e0 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -64,3 +64,16 @@ releases: envtest-v1.33.2-linux-arm64.tar.gz: hash: 9936eba66fd0170808268da4c0609b7e7d4d1b0de8607b0d3a9091539b4ec881041a9e08e7b4839708b11139bcc850acd34dfc0305ed955cc61fc3fae9da58f5 selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-linux-arm64.tar.gz + v1.34.1: + envtest-v1.34.1-darwin-amd64.tar.gz: + hash: 3bf575e77d35803b81685969915d70ae23f2267bafd1fe17087126d6fcdfe67590d2f51ce59ff8f0d06e5d94b0f4d0ac3c16de1544008e9c617499cfc51844c5 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.34.1-darwin-amd64.tar.gz + envtest-v1.34.1-darwin-arm64.tar.gz: + hash: 3c9c1d457d3fbb5c5cfb6e6c4ac31b41172cf413b9a81f8f53ac717a643f730d135d4d09549f9d78685c23704a7e3f12c891896dc23c4b1a211a10e1fd9bc043 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.34.1-darwin-arm64.tar.gz + envtest-v1.34.1-linux-amd64.tar.gz: + hash: e5aeda6d9f9456e27c5c001bc4476a0bccc06f1431c2a9752a2ac040f69671927204dcc254bba8ebb2fb91d0e32620abfaba6daad6a80dbe376d93e57fcd2431 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.34.1-linux-amd64.tar.gz + envtest-v1.34.1-linux-arm64.tar.gz: + hash: e2ee7e47ceeba56624fd869922ab9851200482ef835c09fe3dd57c9806a992a7e1f56641906510ebb095514953aa8a3af68d45a82be45b94981a50e894ac6e42 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.34.1-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 54c9fd3deadf..025ed84f94f8 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -1,108 +1,119 @@ -| FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | -| ------ | --- | --- | --- | --- | --- | --- | -| ClusterAPIInstall| | | | | | | -| EventedPLEG| | | | | | | -| MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | -| MultiArchInstallAzure| | | | | | | -| ShortCertRotation| | | | | | | -| BootImageSkewEnforcement| | | Enabled | Enabled | | | -| ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | -| Example2| | | Enabled | Enabled | | | -| ExternalSnapshotMetadata| | | Enabled | Enabled | | | -| NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | -| NewOLMOwnSingleNamespace| | | | Enabled | | Enabled | -| NewOLMPreflightPermissionChecks| | | | Enabled | | Enabled | -| NewOLMWebhookProviderOpenshiftServiceCA| | | | Enabled | | Enabled | -| NoRegistryClusterOperations| | | | Enabled | | Enabled | -| VSphereMixedNodeEnv| | | Enabled | Enabled | | | -| NewOLM| | Enabled | | Enabled | | Enabled | -| AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | -| AWSClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | -| AWSDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | -| AWSDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | -| AWSServiceLBNetworkSecurityGroup| | | Enabled | Enabled | Enabled | Enabled | -| AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | -| AzureClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | -| AzureDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | -| AzureDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | -| AzureMultiDisk| | | Enabled | Enabled | Enabled | Enabled | -| BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CBORServingAndStorage| | | Enabled | Enabled | Enabled | Enabled | -| ClientsAllowCBOR| | | Enabled | Enabled | Enabled | Enabled | -| ClientsPreferCBOR| | | Enabled | Enabled | Enabled | Enabled | -| ClusterAPIInstallIBMCloud| | | Enabled | Enabled | Enabled | Enabled | -| ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | -| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | Enabled | Enabled | -| DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | -| DualReplica| | | Enabled | Enabled | Enabled | Enabled | -| DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | Enabled | Enabled | -| DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | -| EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | -| EventTTL| | | Enabled | Enabled | Enabled | Enabled | -| Example| | | Enabled | Enabled | Enabled | Enabled | -| GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | -| GCPCustomAPIEndpoints| | | Enabled | Enabled | Enabled | Enabled | -| GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | Enabled | Enabled | -| GCPDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | -| ImageModeStatusReporting| | | Enabled | Enabled | Enabled | Enabled | -| ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | -| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | -| InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | -| InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | -| IrreconcilableMachineConfig| | | Enabled | Enabled | Enabled | Enabled | -| KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | -| MachineAPIMigration| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesAzure| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesCPMS| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesvSphere| | | Enabled | Enabled | Enabled | Enabled | -| MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | -| MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | -| MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | -| MultiDiskSetup| | | Enabled | Enabled | Enabled | Enabled | -| MutatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | -| NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | -| OVNObservability| | | Enabled | Enabled | Enabled | Enabled | -| PreconfiguredUDNAddresses| | | Enabled | Enabled | Enabled | Enabled | -| SELinuxMount| | | Enabled | Enabled | Enabled | Enabled | -| SignatureStores| | | Enabled | Enabled | Enabled | Enabled | -| SigstoreImageVerificationPKI| | | Enabled | Enabled | Enabled | Enabled | -| TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled | -| VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | Enabled | Enabled | -| VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | -| VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | -| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ExternalOIDC| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ExternalOIDCWithUIDAndExtraClaimMappings| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GCPClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GatewayAPI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GatewayAPIController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| HighlyAvailableArbiter| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ImageVolume| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| StoragePerformantSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| UpgradeStatus| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VolumeAttributesClass| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | OKD on Hypershift | OKD on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | +| ------ | --- | --- | --- | --- | --- | --- | --- | --- | +| ClientsAllowCBOR| | | | | | | | | +| ClusterAPIInstall| | | | | | | | | +| EventedPLEG| | | | | | | | | +| MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | | +| MultiArchInstallAzure| | | | | | | | | +| NewOLMBoxCutterRuntime| | | | | | | | | +| ShortCertRotation| | | | | | | | | +| ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | | | +| Example2| | | Enabled | Enabled | | | | | +| ExternalSnapshotMetadata| | | Enabled | Enabled | | | | | +| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | | | +| NetworkConnect| | | Enabled | Enabled | | | | | +| NewOLMCatalogdAPIV1Metas| | | | Enabled | | | | Enabled | +| NewOLMPreflightPermissionChecks| | | | Enabled | | | | Enabled | +| NoRegistryClusterInstall| | | | Enabled | | | | Enabled | +| ProvisioningRequestAvailable| | | Enabled | Enabled | | | | | +| AWSClusterHostedDNS| | | Enabled | Enabled | | | Enabled | Enabled | +| AWSClusterHostedDNSInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| AWSDedicatedHosts| | | Enabled | Enabled | | | Enabled | Enabled | +| AWSDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| AWSServiceLBNetworkSecurityGroup| | | Enabled | Enabled | | | Enabled | Enabled | +| AutomatedEtcdBackup| | | Enabled | Enabled | | | Enabled | Enabled | +| AzureClusterHostedDNSInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| AzureDedicatedHosts| | | Enabled | Enabled | | | Enabled | Enabled | +| AzureDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| AzureMultiDisk| | | Enabled | Enabled | | | Enabled | Enabled | +| BootImageSkewEnforcement| | | Enabled | Enabled | | | Enabled | Enabled | +| BootcNodeManagement| | | Enabled | Enabled | | | Enabled | Enabled | +| CBORServingAndStorage| | | Enabled | Enabled | | | Enabled | Enabled | +| CRDCompatibilityRequirementOperator| | | Enabled | Enabled | | | Enabled | Enabled | +| ClientsPreferCBOR| | | Enabled | Enabled | | | Enabled | Enabled | +| ClusterAPIInstallIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | +| ClusterAPIMachineManagement| | | Enabled | Enabled | | | Enabled | Enabled | +| ClusterMonitoringConfig| | | Enabled | Enabled | | | Enabled | Enabled | +| ClusterUpdateAcceptRisks| | | Enabled | Enabled | | | Enabled | Enabled | +| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | Enabled | Enabled | +| ConfigurablePKI| | | Enabled | Enabled | | | Enabled | Enabled | +| DNSNameResolver| | | Enabled | Enabled | | | Enabled | Enabled | +| DualReplica| | | Enabled | Enabled | | | Enabled | Enabled | +| DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | +| EVPN| | | Enabled | Enabled | | | Enabled | Enabled | +| EtcdBackendQuota| | | Enabled | Enabled | | | Enabled | Enabled | +| EventTTL| | | Enabled | Enabled | | | Enabled | Enabled | +| Example| | | Enabled | Enabled | | | Enabled | Enabled | +| ExternalOIDCWithUpstreamParity| | | Enabled | Enabled | | | Enabled | Enabled | +| GCPClusterHostedDNS| | | Enabled | Enabled | | | Enabled | Enabled | +| GCPCustomAPIEndpoints| | | Enabled | Enabled | | | Enabled | Enabled | +| GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| GCPDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| HyperShiftOnlyDynamicResourceAllocation| Enabled | | Enabled | | Enabled | | Enabled | | +| ImageModeStatusReporting| | | Enabled | Enabled | | | Enabled | Enabled | +| InsightsConfig| | | Enabled | Enabled | | | Enabled | Enabled | +| InsightsOnDemandDataGather| | | Enabled | Enabled | | | Enabled | Enabled | +| IrreconcilableMachineConfig| | | Enabled | Enabled | | | Enabled | Enabled | +| KMSEncryptionProvider| | | Enabled | Enabled | | | Enabled | Enabled | +| MachineAPIMigration| | | Enabled | Enabled | | | Enabled | Enabled | +| ManagedBootImagesCPMS| | | Enabled | Enabled | | | Enabled | Enabled | +| MaxUnavailableStatefulSet| | | Enabled | Enabled | | | Enabled | Enabled | +| MinimumKubeletVersion| | | Enabled | Enabled | | | Enabled | Enabled | +| MixedCPUsAllocation| | | Enabled | Enabled | | | Enabled | Enabled | +| MultiDiskSetup| | | Enabled | Enabled | | | Enabled | Enabled | +| MutableCSINodeAllocatableCount| | | Enabled | Enabled | | | Enabled | Enabled | +| MutatingAdmissionPolicy| | | Enabled | Enabled | | | Enabled | Enabled | +| NewOLM| | Enabled | | Enabled | | Enabled | | Enabled | +| NewOLMOwnSingleNamespace| | Enabled | | Enabled | | Enabled | | Enabled | +| NewOLMWebhookProviderOpenshiftServiceCA| | Enabled | | Enabled | | Enabled | | Enabled | +| NutanixMultiSubnets| | | Enabled | Enabled | | | Enabled | Enabled | +| OSStreams| | | Enabled | Enabled | | | Enabled | Enabled | +| OVNObservability| | | Enabled | Enabled | | | Enabled | Enabled | +| OnPremDNSRecords| | | Enabled | Enabled | | | Enabled | Enabled | +| SELinuxMount| | | Enabled | Enabled | | | Enabled | Enabled | +| SignatureStores| | | Enabled | Enabled | | | Enabled | Enabled | +| VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | | | Enabled | Enabled | +| VSphereHostVMGroupZonal| | | Enabled | Enabled | | | Enabled | Enabled | +| VSphereMixedNodeEnv| | | Enabled | Enabled | | | Enabled | Enabled | +| VolumeGroupSnapshot| | | Enabled | Enabled | | | Enabled | Enabled | +| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDC| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDCWithUIDAndExtraClaimMappings| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GCPClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPIController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| HighlyAvailableArbiter| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ImageStreamImportMode| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ImageVolume| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesvSphere| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PreconfiguredUDNAddresses| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerificationPKI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| StoragePerformantSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UpgradeStatus| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VolumeAttributesClass| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 5d3836a4366d..289cf106bdce 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -40,7 +40,7 @@ var ( reportProblemsToJiraComponent("Management Console"). contactPerson("jhadvig"). productScope(ocpSpecific). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1706"). mustRegister() @@ -49,7 +49,7 @@ var ( contactPerson("ibihim"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4193"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateMutatingAdmissionPolicy = newFeatureGate("MutatingAdmissionPolicy"). @@ -65,7 +65,7 @@ var ( contactPerson("miciah"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission"). @@ -73,7 +73,7 @@ var ( contactPerson("ibihim"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/899"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). @@ -81,23 +81,15 @@ var ( contactPerson("adkaplan"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). - reportProblemsToJiraComponent("scheduling"). - contactPerson("jchaloup"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/4381"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity"). reportProblemsToJiraComponent("cloud-credential-operator"). contactPerson("abutcher"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateAzureDedicatedHosts = newFeatureGate("AzureDedicatedHosts"). @@ -128,7 +120,7 @@ var ( contactPerson("sgrunert"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateSigstoreImageVerificationPKI = newFeatureGate("SigstoreImageVerificationPKI"). @@ -136,7 +128,7 @@ var ( contactPerson("QiWang"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1658"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). @@ -144,7 +136,7 @@ var ( contactPerson("jspeed"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). @@ -160,7 +152,7 @@ var ( contactPerson("vr4manta"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1709"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). @@ -168,7 +160,7 @@ var ( contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). @@ -176,7 +168,7 @@ var ( contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1714"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). @@ -184,7 +176,7 @@ var ( contactPerson("tssurya"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkSegmentation = newFeatureGate("NetworkSegmentation"). @@ -192,7 +184,15 @@ var ( contactPerson("tssurya"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1623"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkConnect = newFeatureGate("NetworkConnect"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("tssurya"). + productScope(ocpSpecific). + enhancementPR("https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5246"). + enableIn(configv1.DevPreviewNoUpgrade). mustRegister() FeatureGateAdditionalRoutingCapabilities = newFeatureGate("AdditionalRoutingCapabilities"). @@ -200,7 +200,7 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateRouteAdvertisements = newFeatureGate("RouteAdvertisements"). @@ -208,15 +208,23 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateEVPN = newFeatureGate("EVPN"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("jcaamano"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1862"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("pliu"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). @@ -224,7 +232,7 @@ var ( contactPerson("kyrtapz"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateOVNObservability = newFeatureGate("OVNObservability"). @@ -271,7 +279,7 @@ var ( contactPerson("ijanssen"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1765"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateImageModeStatusReporting = newFeatureGate("ImageModeStatusReporting"). @@ -326,7 +334,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesAWS = newFeatureGate("ManagedBootImagesAWS"). @@ -334,7 +342,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesvSphere = newFeatureGate("ManagedBootImagesvSphere"). @@ -342,7 +350,7 @@ var ( contactPerson("rsaini"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1496"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesAzure = newFeatureGate("ManagedBootImagesAzure"). @@ -350,7 +358,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1761"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesCPMS = newFeatureGate("ManagedBootImagesCPMS"). @@ -366,7 +374,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1761"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateBootcNodeManagement = newFeatureGate("BootcNodeManagement"). @@ -390,7 +398,7 @@ var ( contactPerson("dgrisonnet"). productScope(kubernetes). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGatePinnedImages = newFeatureGate("PinnedImages"). @@ -398,7 +406,7 @@ var ( contactPerson("RishabhSaini"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). @@ -406,23 +414,15 @@ var ( contactPerson("pmuller"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("akashem"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/4006"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateVolumeAttributesClass = newFeatureGate("VolumeAttributesClass"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("dfajmon"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/3751"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). @@ -446,7 +446,7 @@ var ( contactPerson("liouk"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1596"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateExternalOIDCWithAdditionalClaimMappings = newFeatureGate("ExternalOIDCWithUIDAndExtraClaimMappings"). @@ -454,9 +454,17 @@ var ( contactPerson("bpalmer"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1777"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() + FeatureGateExternalOIDCWithUpstreamParity = newFeatureGate("ExternalOIDCWithUpstreamParity"). + reportProblemsToJiraComponent("authentication"). + contactPerson("saldawam"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1763"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateExample = newFeatureGate("Example"). reportProblemsToJiraComponent("cluster-config"). contactPerson("deads"). @@ -478,7 +486,7 @@ var ( contactPerson("joe"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateNewOLMCatalogdAPIV1Metas = newFeatureGate("NewOLMCatalogdAPIV1Metas"). @@ -501,18 +509,25 @@ var ( reportProblemsToJiraComponent("olm"). contactPerson("nschieder"). productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1774"). - enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1849"). + enableForClusterProfile(SelfManaged, configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNewOLMWebhookProviderOpenshiftServiceCA = newFeatureGate("NewOLMWebhookProviderOpenshiftServiceCA"). reportProblemsToJiraComponent("olm"). contactPerson("pegoncal"). productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1799"). - enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1844"). + enableForClusterProfile(SelfManaged, configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateNewOLMBoxCutterRuntime = newFeatureGate("NewOLMBoxCutterRuntime"). + reportProblemsToJiraComponent("olm"). + contactPerson("pegoncal"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1890"). + mustRegister() + FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -534,7 +549,7 @@ var ( contactPerson("rexagod"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateClusterAPIInstallIBMCloud = newFeatureGate("ClusterAPIInstallIBMCloud"). @@ -553,6 +568,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateClusterAPIMachineManagement = newFeatureGate("ClusterAPIMachineManagement"). + reportProblemsToJiraComponent("Cloud Compute / Cluster API Providers"). + contactPerson("ddonati"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1465"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateClusterAPIMachineManagementVSphere = newFeatureGate("ClusterAPIMachineManagementVSphere"). reportProblemsToJiraComponent("SPLAT"). contactPerson("jcpowermac"). @@ -581,7 +604,7 @@ var ( contactPerson("psundara"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateUserNamespacesSupport = newFeatureGate("UserNamespacesSupport"). @@ -589,7 +612,7 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() // Note: this feature is perma-alpha, but it is safe and desireable to enable. @@ -600,7 +623,7 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateProcMountType = newFeatureGate("ProcMountType"). @@ -608,7 +631,7 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). @@ -616,7 +639,7 @@ var ( contactPerson("rvanderp"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateIngressControllerDynamicConfigurationManager = newFeatureGate("IngressControllerDynamicConfigurationManager"). @@ -624,7 +647,7 @@ var ( contactPerson("miciah"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade). mustRegister() FeatureGateMinimumKubeletVersion = newFeatureGate("MinimumKubeletVersion"). @@ -656,7 +679,7 @@ var ( contactPerson("eggfoobar"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1674"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateCVOConfiguration = newFeatureGate("ClusterVersionOperatorConfiguration"). @@ -667,6 +690,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateClusterUpdateAcceptRisks = newFeatureGate("ClusterUpdateAcceptRisks"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("hongkliu"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1807"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateGCPCustomAPIEndpoints = newFeatureGate("GCPCustomAPIEndpoints"). reportProblemsToJiraComponent("Installer"). contactPerson("barbacbd"). @@ -709,7 +740,7 @@ var ( // A dedicated feature gate now controls the Gateway Controller to distinguish // its production readiness from that of the CRDs. enhancementPR("https://github.com/openshift/enhancements/pull/1756"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureShortCertRotation = newFeatureGate("ShortCertRotation"). @@ -740,7 +771,7 @@ var ( contactPerson("hekumar"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1804"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateMultiDiskSetup = newFeatureGate("MultiDiskSetup"). @@ -752,8 +783,8 @@ var ( mustRegister() FeatureGateAWSDedicatedHosts = newFeatureGate("AWSDedicatedHosts"). - reportProblemsToJiraComponent("Installer"). - contactPerson("faermanj"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1781"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). @@ -764,7 +795,7 @@ var ( contactPerson("vr4manta"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1772"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). @@ -772,7 +803,7 @@ var ( contactPerson("kyrtapz"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1793"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). @@ -788,10 +819,10 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/openshift/enhancements/pull/1792"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateNoRegistryClusterOperations = newFeatureGate("NoRegistryClusterOperations"). + FeatureGateNoRegistryClusterInstall = newFeatureGate("NoRegistryClusterInstall"). reportProblemsToJiraComponent("Installer / Agent based installation"). contactPerson("andfasano"). productScope(ocpSpecific). @@ -804,7 +835,7 @@ var ( contactPerson("barbacbd"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1468"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateAWSClusterHostedDNSInstall = newFeatureGate("AWSClusterHostedDNSInstall"). @@ -867,7 +898,6 @@ var ( contactPerson("benluddy"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4222"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureClientsPreferCBOR = newFeatureGate("ClientsPreferCBOR"). @@ -885,4 +915,58 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1857"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateMutableCSINodeAllocatableCount = newFeatureGate("MutableCSINodeAllocatableCount"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("jsafrane"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4876"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateOSStreams = newFeatureGate("OSStreams"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("pabrodri"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1874"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCRDCompatibilityRequirementOperator = newFeatureGate("CRDCompatibilityRequirementOperator"). + reportProblemsToJiraComponent("Cloud Compute / Cluster API Providers"). + contactPerson("ddonati"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1845"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateOnPremDNSRecords = newFeatureGate("OnPremDNSRecords"). + reportProblemsToJiraComponent("Networking / On-Prem DNS"). + contactPerson("bnemec"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1803"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateProvisioningRequestAvailable = newFeatureGate("ProvisioningRequestAvailable"). + reportProblemsToJiraComponent("Cluster Autoscaler"). + contactPerson("elmiko"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1752"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGateHyperShiftOnlyDynamicResourceAllocation = newFeatureGate("HyperShiftOnlyDynamicResourceAllocation"). + reportProblemsToJiraComponent("hypershift"). + contactPerson("csrwng"). + productScope(ocpSpecific). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4381"). + enableForClusterProfile(Hypershift, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). + mustRegister() + + FeatureGateConfigurablePKI = newFeatureGate("ConfigurablePKI"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("sanchezl"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1882"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/features/util.go b/vendor/github.com/openshift/api/features/util.go index 59bb7bff4078..8606b6befd5d 100644 --- a/vendor/github.com/openshift/api/features/util.go +++ b/vendor/github.com/openshift/api/features/util.go @@ -2,9 +2,10 @@ package features import ( "fmt" - configv1 "github.com/openshift/api/config/v1" "net/url" "strings" + + configv1 "github.com/openshift/api/config/v1" ) // FeatureGateDescription is a golang-only interface used to contains details for a feature gate. @@ -133,8 +134,10 @@ func (b *featureGateBuilder) register() (configv1.FeatureGateName, error) { case len(b.enhancementPRURL) == 0: return "", fmt.Errorf("FeatureGate/%s is missing an enhancementPR with GA Graduation Criteria like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) - case !strings.HasPrefix(b.enhancementPRURL, "https://github.com/openshift/enhancements/pull/") && !strings.HasPrefix(b.enhancementPRURL, "https://github.com/kubernetes/enhancements/issues/"): - return "", fmt.Errorf("FeatureGate/%s enhancementPR format is incorrect; must be like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + case !strings.HasPrefix(b.enhancementPRURL, "https://github.com/openshift/enhancements/pull/") && + !strings.HasPrefix(b.enhancementPRURL, "https://github.com/kubernetes/enhancements/issues/") && + !strings.HasPrefix(b.enhancementPRURL, "https://github.com/ovn-kubernetes/ovn-kubernetes/pull/"): + return "", fmt.Errorf("FeatureGate/%s enhancementPR format is incorrect; must be like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/#### or https://github.com/ovn-kubernetes/ovn-kubernetes/pull/####", b.name) case enhancementPRErr != nil: return "", fmt.Errorf("FeatureGate/%s is enhancementPR is invalid: %w", b.name, enhancementPRErr) diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index cc9115000915..ea5f349708ca 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -47,6 +47,7 @@ import ( kstoragev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "github.com/openshift/api/apiextensions" "github.com/openshift/api/apiserver" "github.com/openshift/api/apps" "github.com/openshift/api/authorization" @@ -83,6 +84,7 @@ import ( var ( schemeBuilder = runtime.NewSchemeBuilder( + apiextensions.Install, apiserver.Install, apps.Install, authorization.Install, diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index b3b38bc6cc14..d1d5941fa836 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -114,6 +114,15 @@ type AWSMachineProviderConfig struct { // If this value is selected, capacityReservationID must be specified to identify the target reservation. // +optional MarketType MarketType `json:"marketType,omitempty"` + + // Tombstone: This field was moved into the Placement struct to belong w/ the Tenancy field due to involvement with the setting. + // hostPlacement configures placement on AWS Dedicated Hosts. This allows admins to assign instances to specific host + // for a variety of needs including for regulatory compliance, to leverage existing per-socket or per-core software licenses (BYOL), + // and to gain visibility and control over instance placement on a physical server. + // When omitted, the instance is not constrained to a dedicated host. + // +openshift:enable:FeatureGate=AWSDedicatedHosts + // +optional + //HostPlacement *HostPlacement `json:"hostPlacement,omitempty"` } // AWSConfidentialComputePolicy represents the confidential compute configuration for the instance. @@ -205,6 +214,19 @@ type EBSBlockDeviceSpec struct { // it is not used in requests to create gp2, st1, sc1, or standard volumes. // +optional Iops *int64 `json:"iops,omitempty"` + // throughputMib to provision in MiB/s supported for the volume type. Not applicable to all types. + // + // This parameter is valid only for gp3 volumes. + // Valid Range: Minimum value of 125. Maximum value of 2000. + // + // When omitted, this means no opinion, and the platform is left to + // choose a reasonable default, which is subject to change over time. + // The current default is 125. + // + // +kubebuilder:validation:Minimum:=125 + // +kubebuilder:validation:Maximum:=2000 + // +optional + ThroughputMib *int32 `json:"throughputMib,omitempty"` // The size of the volume, in GiB. // // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned @@ -217,7 +239,7 @@ type EBSBlockDeviceSpec struct { // a volume size, the default is the snapshot size. // +optional VolumeSize *int64 `json:"volumeSize,omitempty"` - // The volume type: gp2, io1, st1, sc1, or standard. + // volumeType can be of type gp2, gp3, io1, st1, sc1, or standard. // Default: standard // +optional VolumeType *string `json:"volumeType,omitempty"` @@ -273,6 +295,7 @@ type AWSResourceReference struct { } // Placement indicates where to create the instance in AWS +// +kubebuilder:validation:XValidation:rule="has(self.tenancy) && self.tenancy == 'host' ? true : !has(self.host)",message="host may only be specified when tenancy is host" type Placement struct { // region is the region to use to create the instance // +optional @@ -282,8 +305,19 @@ type Placement struct { AvailabilityZone string `json:"availabilityZone,omitempty"` // tenancy indicates if instance should run on shared or single-tenant hardware. There are // supported 3 options: default, dedicated and host. + // When set to default Runs on shared multi-tenant hardware. + // When dedicated Runs on single-tenant hardware (any dedicated instance hardware). + // When host and the host object is not provided: Runs on Dedicated Host; best-effort restart on same host. + // When `host` and `host` object is provided with affinity `dedicatedHost` defined: Runs on specified Dedicated Host. // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` + // host configures placement on AWS Dedicated Hosts. This allows admins to assign instances to specific host + // for a variety of needs including for regulatory compliance, to leverage existing per-socket or per-core software licenses (BYOL), + // and to gain visibility and control over instance placement on a physical server. + // When omitted, the instance is not constrained to a dedicated host. + // +openshift:enable:FeatureGate=AWSDedicatedHosts + // +optional + Host *HostPlacement `json:"host,omitempty"` } // Filter is a filter used to identify an AWS resource @@ -393,3 +427,48 @@ const ( // When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. MarketTypeCapacityBlock MarketType = "CapacityBlock" ) + +// HostPlacement is the type that will be used to configure the placement of AWS instances. +// +kubebuilder:validation:XValidation:rule="has(self.affinity) && self.affinity == 'DedicatedHost' ? has(self.dedicatedHost) : true",message="dedicatedHost is required when affinity is DedicatedHost, and optional otherwise" +// +union +type HostPlacement struct { + // affinity specifies the affinity setting for the instance. + // Allowed values are AnyAvailable and DedicatedHost. + // When Affinity is set to DedicatedHost, an instance started onto a specific host always restarts on the same host if stopped. In this scenario, the `dedicatedHost` field must be set. + // When Affinity is set to AnyAvailable, and you stop and restart the instance, it can be restarted on any available host. + // When Affinity is set to AnyAvailable and the `dedicatedHost` field is defined, it runs on specified Dedicated Host, but may move if stopped. + // +required + // +unionDiscriminator + Affinity *HostAffinity `json:"affinity,omitempty"` + + // dedicatedHost specifies the exact host that an instance should be restarted on if stopped. + // dedicatedHost is required when 'affinity' is set to DedicatedHost, and optional otherwise. + // +optional + // +unionMember + DedicatedHost *DedicatedHost `json:"dedicatedHost,omitempty"` +} + +// HostAffinity selects how an instance should be placed on AWS Dedicated Hosts. +// +kubebuilder:validation:Enum:=DedicatedHost;AnyAvailable +type HostAffinity string + +const ( + // HostAffinityAnyAvailable lets the platform select any available dedicated host. + HostAffinityAnyAvailable HostAffinity = "AnyAvailable" + + // HostAffinityDedicatedHost requires specifying a particular host via dedicatedHost.host.hostID. + HostAffinityDedicatedHost HostAffinity = "DedicatedHost" +) + +// DedicatedHost represents the configuration for the usage of dedicated host. +type DedicatedHost struct { + // id identifies the AWS Dedicated Host on which the instance must run. + // The value must start with "h-" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). + // The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. + // Must be either 10 or 19 characters in length. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="hostID must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" + // +kubebuilder:validation:MinLength=10 + // +kubebuilder:validation:MaxLength=19 + // +required + ID string `json:"id,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go index 72a31b5bddaf..9713a4e4a87f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -25,6 +25,14 @@ const ( RestartPolicyNever GCPRestartPolicyType = "Never" ) +// GCPProvisioningModelType is a type representing acceptable values for ProvisioningModel field in GCPMachineProviderSpec +type GCPProvisioningModelType string + +const ( + // GCPSpotInstance enables the GCP instances as spot instances which provide significant cost savings but may be preempted by Google Cloud Platform when resources are needed elsewhere. + GCPSpotInstance GCPProvisioningModelType = "Spot" +) + // SecureBootPolicy represents the secure boot configuration for the GCP machine. type SecureBootPolicy string @@ -129,6 +137,14 @@ type GCPMachineProviderSpec struct { // preemptible indicates if created instance is preemptible. // +optional Preemptible bool `json:"preemptible,omitempty"` + // provisioningModel is an optional field that determines the provisioning model for the GCP machine instance. + // Valid values are "Spot" and omitted. + // When set to Spot, the instance runs as a Google Cloud Spot instance which provides significant cost savings but may be preempted by Google Cloud Platform when resources are needed elsewhere. + // When omitted, the machine will be provisioned as a standard on-demand instance. + // This field cannot be used together with the preemptible field. + // +optional + // +kubebuilder:validation:Enum=Spot + ProvisioningModel *GCPProvisioningModelType `json:"provisioningModel,omitempty"` // onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. // This is required to be set to "Terminate" if you want to provision machine with attached GPUs. // Otherwise, allowed values are "Migrate" and "Terminate". diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index 33f472f9264d..6bfe850812b8 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -185,6 +185,18 @@ const ( MachineAuthorityMigrating MachineAuthority = "Migrating" ) +// SynchronizedAPI holds the last stable value of authoritativeAPI. +// +kubebuilder:validation:Enum=MachineAPI;ClusterAPI +type SynchronizedAPI string + +const ( + // MachineAPISynchronized indicates that the Machine API is the last synchronized API. + MachineAPISynchronized SynchronizedAPI = "MachineAPI" + + // ClusterAPISynchronized indicates that the Cluster API is the last synchronized API. + ClusterAPISynchronized SynchronizedAPI = "ClusterAPI" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -317,6 +329,7 @@ type LifecycleHook struct { // MachineStatus defines the observed state of Machine // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="has(self.authoritativeAPI) || !has(oldSelf.authoritativeAPI)",message="authoritativeAPI may not be removed once set" type MachineStatus struct { // nodeRef will point to the corresponding Node if it exists. // +optional @@ -406,6 +419,14 @@ type MachineStatus struct { // +optional AuthoritativeAPI MachineAuthority `json:"authoritativeAPI,omitempty"` + // synchronizedAPI holds the last stable value of authoritativeAPI. + // It is used to detect migration cancellation requests and to restore the resource to its previous state. + // Valid values are "MachineAPI" and "ClusterAPI". + // When omitted, the resource has not yet been reconciled by the migration controller. + // +openshift:enable:FeatureGate=MachineAPIMigration + // +optional + SynchronizedAPI SynchronizedAPI `json:"synchronizedAPI,omitempty"` + // synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. // This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match. // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index a2343dc3982f..be5476344b12 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -112,6 +112,7 @@ type MachineTemplateSpec struct { // MachineSetStatus defines the observed state of MachineSet // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="has(self.authoritativeAPI) || !has(oldSelf.authoritativeAPI)",message="authoritativeAPI may not be removed once set" type MachineSetStatus struct { // replicas is the most recently observed number of replicas. // +optional @@ -168,6 +169,14 @@ type MachineSetStatus struct { // +optional AuthoritativeAPI MachineAuthority `json:"authoritativeAPI,omitempty"` + // synchronizedAPI holds the last stable value of authoritativeAPI. + // It is used to detect migration cancellation requests and to restore the resource to its previous state. + // Valid values are "MachineAPI" and "ClusterAPI". + // When omitted, the resource has not yet been reconciled by the migration controller. + // +openshift:enable:FeatureGate=MachineAPIMigration + // +optional + SynchronizedAPI SynchronizedAPI `json:"synchronizedAPI,omitempty"` + // synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. // This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match. // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index 5aa4f90a497a..d08906c7d871 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -61,7 +61,7 @@ func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) } } in.Subnet.DeepCopyInto(&out.Subnet) - out.Placement = in.Placement + in.Placement.DeepCopyInto(&out.Placement) if in.LoadBalancers != nil { in, out := &in.LoadBalancers, &out.LoadBalancers *out = make([]LoadBalancerReference, len(*in)) @@ -509,6 +509,22 @@ func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHost. +func (in *DedicatedHost) DeepCopy() *DedicatedHost { + if in == nil { + return nil + } + out := new(DedicatedHost) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { *out = *in @@ -560,6 +576,11 @@ func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = new(int64) **out = **in } + if in.ThroughputMib != nil { + in, out := &in.ThroughputMib, &out.ThroughputMib + *out = new(int32) + **out = **in + } if in.VolumeSize != nil { in, out := &in.VolumeSize, &out.VolumeSize *out = new(int64) @@ -762,6 +783,11 @@ func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { *out = make([]GCPGPUConfig, len(*in)) copy(*out, *in) } + if in.ProvisioningModel != nil { + in, out := &in.ProvisioningModel, &out.ProvisioningModel + *out = new(GCPProvisioningModelType) + **out = **in + } out.ShieldedInstanceConfig = in.ShieldedInstanceConfig if in.ResourceManagerTags != nil { in, out := &in.ResourceManagerTags, &out.ResourceManagerTags @@ -898,6 +924,32 @@ func (in *GCPShieldedInstanceConfig) DeepCopy() *GCPShieldedInstanceConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPlacement) DeepCopyInto(out *HostPlacement) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(HostAffinity) + **out = **in + } + if in.DedicatedHost != nil { + in, out := &in.DedicatedHost, &out.DedicatedHost + *out = new(DedicatedHost) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPlacement. +func (in *HostPlacement) DeepCopy() *HostPlacement { + if in == nil { + return nil + } + out := new(HostPlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Image) DeepCopyInto(out *Image) { *out = *in @@ -1577,6 +1629,11 @@ func (in *ObjectMeta) DeepCopy() *ObjectMeta { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Placement) DeepCopyInto(out *Placement) { *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(HostPlacement) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 4a1b969a81ed..0d043ad603db 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -92,14 +92,24 @@ func (CPUOptions) SwaggerDoc() map[string]string { return map_CPUOptions } +var map_DedicatedHost = map[string]string{ + "": "DedicatedHost represents the configuration for the usage of dedicated host.", + "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length.", +} + +func (DedicatedHost) SwaggerDoc() map[string]string { + return map_DedicatedHost +} + var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", + "throughputMib": "throughputMib to provision in MiB/s supported for the volume type. Not applicable to all types.\n\nThis parameter is valid only for gp3 volumes. Valid Range: Minimum value of 125. Maximum value of 2000.\n\nWhen omitted, this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 125.", "volumeSize": "The size of the volume, in GiB.\n\nConstraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n\nDefault: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.", - "volumeType": "The volume type: gp2, io1, st1, sc1, or standard. Default: standard", + "volumeType": "volumeType can be of type gp2, gp3, io1, st1, sc1, or standard. Default: standard", } func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { @@ -116,6 +126,16 @@ func (Filter) SwaggerDoc() map[string]string { return map_Filter } +var map_HostPlacement = map[string]string{ + "": "HostPlacement is the type that will be used to configure the placement of AWS instances.", + "affinity": "affinity specifies the affinity setting for the instance. Allowed values are AnyAvailable and DedicatedHost. When Affinity is set to DedicatedHost, an instance started onto a specific host always restarts on the same host if stopped. In this scenario, the `dedicatedHost` field must be set. When Affinity is set to AnyAvailable, and you stop and restart the instance, it can be restarted on any available host. When Affinity is set to AnyAvailable and the `dedicatedHost` field is defined, it runs on specified Dedicated Host, but may move if stopped.", + "dedicatedHost": "dedicatedHost specifies the exact host that an instance should be restarted on if stopped. dedicatedHost is required when 'affinity' is set to DedicatedHost, and optional otherwise.", +} + +func (HostPlacement) SwaggerDoc() map[string]string { + return map_HostPlacement +} + var map_LoadBalancerReference = map[string]string{ "": "LoadBalancerReference is a reference to a load balancer on AWS.", } @@ -137,7 +157,8 @@ var map_Placement = map[string]string{ "": "Placement indicates where to create the instance in AWS", "region": "region is the region to use to create the instance", "availabilityZone": "availabilityZone is the availability zone of the instance", - "tenancy": "tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", + "tenancy": "tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host. When set to default Runs on shared multi-tenant hardware. When dedicated Runs on single-tenant hardware (any dedicated instance hardware). When host and the host object is not provided: Runs on Dedicated Host; best-effort restart on same host. When `host` and `host` object is provided with affinity `dedicatedHost` defined: Runs on specified Dedicated Host.", + "host": "host configures placement on AWS Dedicated Hosts. This allows admins to assign instances to specific host for a variety of needs including for regulatory compliance, to leverage existing per-socket or per-core software licenses (BYOL), and to gain visibility and control over instance placement on a physical server. When omitted, the instance is not constrained to a dedicated host.", } func (Placement) SwaggerDoc() map[string]string { @@ -452,6 +473,7 @@ var map_GCPMachineProviderSpec = map[string]string{ "projectID": "projectID is the project in which the GCP machine provider will create the VM.", "gpus": "gpus is a list of GPUs to be attached to the VM.", "preemptible": "preemptible indicates if created instance is preemptible.", + "provisioningModel": "provisioningModel is an optional field that determines the provisioning model for the GCP machine instance. Valid values are \"Spot\" and omitted. When set to Spot, the instance runs as a Google Cloud Spot instance which provides significant cost savings but may be preempted by Google Cloud Platform when resources are needed elsewhere. When omitted, the machine will be provisioned as a standard on-demand instance. This field cannot be used together with the preemptible field.", "onHostMaintenance": "onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", "restartPolicy": "restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", "shieldedInstanceConfig": "shieldedInstanceConfig is the Shielded VM configuration for the VM", @@ -604,6 +626,7 @@ var map_MachineStatus = map[string]string{ "phase": "phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", "conditions": "conditions defines the current state of the Machine", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", + "synchronizedAPI": "synchronizedAPI holds the last stable value of authoritativeAPI. It is used to detect migration cancellation requests and to restore the resource to its previous state. Valid values are \"MachineAPI\" and \"ClusterAPI\". When omitted, the resource has not yet been reconciled by the migration controller.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -707,6 +730,7 @@ var map_MachineSetStatus = map[string]string{ "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", "conditions": "conditions defines the current state of the MachineSet", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", + "synchronizedAPI": "synchronizedAPI holds the last stable value of authoritativeAPI. It is used to detect migration cancellation requests and to restore the resource to its previous state. Valid values are \"MachineAPI\" and \"ClusterAPI\". When omitted, the resource has not yet been reconciled by the migration controller.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index cbb1fe077f4e..6673adeb1b28 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -452,6 +452,34 @@ type MachineConfigPoolSpec struct { // +listMapKey=name // +kubebuilder:validation:MaxItems=100 PinnedImageSets []PinnedImageSetRef `json:"pinnedImageSets,omitempty"` + + // osImageStream specifies an OS stream to be used for the pool. + // + // This field can be optionally set to a known OSImageStream name to change the + // OS and Extension images with a well-known, tested, release-provided set of images. + // This enables a streamlined way of switching the pool's node OS to a different version + // than the cluster default, such as transitioning to a major RHEL version. + // + // When set, the referenced stream overrides the cluster-wide OS + // images for the pool with the OS and Extensions associated to stream. + // When omitted, the pool uses the cluster-wide default OS images. + // + // +openshift:enable:FeatureGate=OSStreams + // +optional + OSImageStream OSImageStreamReference `json:"osImageStream,omitempty,omitzero"` +} + +type OSImageStreamReference struct { + // name is a required reference to an OSImageStream to be used for the pool. + // + // It must be a valid RFC 1123 subdomain between 1 and 253 characters in length, + // consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + Name string `json:"name,omitempty"` } type PinnedImageSetRef struct { @@ -517,6 +545,13 @@ type MachineConfigPoolStatus struct { // +listMapKey=poolSynchronizerType // +optional PoolSynchronizersStatus []PoolSynchronizerStatus `json:"poolSynchronizersStatus,omitempty"` + + // osImageStream specifies the last updated OSImageStream for the pool. + // + // When omitted, the pool is using the cluster-wide default OS images. + // +openshift:enable:FeatureGate=OSStreams + // +optional + OSImageStream OSImageStreamReference `json:"osImageStream,omitempty,omitzero"` } // +kubebuilder:validation:XValidation:rule="self.machineCount >= self.updatedMachineCount", message="machineCount must be greater than or equal to updatedMachineCount" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go index 97460171b0a7..a51620fc5a57 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineconfignode.go @@ -158,8 +158,89 @@ type MachineConfigNodeStatus struct { // +kubebuilder:validation:MaxItems=32 // +optional IrreconcilableChanges []IrreconcilableChangeDiff `json:"irreconcilableChanges,omitempty"` + // internalReleaseImage describes the status of the release payloads stored in the node. + // When specified, an internalReleaseImage custom resource exists on the cluster, and the specified images will be made available on the control plane nodes. + // This field will reflect the actual on-disk state of those release images. + // +openshift:enable:FeatureGate=NoRegistryClusterInstall + // +optional + InternalReleaseImage MachineConfigNodeStatusInternalReleaseImage `json:"internalReleaseImage,omitzero,omitempty"` +} + +// MachineConfigNodeStatusInternalReleaseImage holds information about the current and discovered release bundles for the observed machine +// config node. +type MachineConfigNodeStatusInternalReleaseImage struct { + // releases is a list of the release bundles currently owned and managed by the + // cluster. + // A release bundle content could be safely pulled only when its Conditions field + // contains at least an Available entry set to "True" and Degraded to "False". + // Entries must be unique, keyed on the name field. + // releases must contain at least one entry and must not exceed 32 entries. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + // +required + Releases []MachineConfigNodeStatusInternalReleaseImageRef `json:"releases,omitempty"` +} + +// MachineConfigNodeStatusInternalReleaseImageRef is used to provide a more detailed reference for +// a release bundle. +type MachineConfigNodeStatusInternalReleaseImageRef struct { + // conditions represent the observations of an internal release image current state. Valid types are: + // Mounted, Installing, Available, Removing and Degraded. + // + // If Mounted is true, that means that a valid ISO has been mounted on the current node. + // If Installing is true, that means that a new release bundle is currently being copied on the current node, and not yet completed. + // If Available is true, it means that the release has been previously installed on the current node, and it can be used. + // If Removing is true, it means that a release deletion is in progress on the current node, and not yet completed. + // If Degraded is true, that means something has gone wrong in the current node. + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=5 + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. + // The expected name format is ocp-release-bundle--. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:XValidation:rule=`self.matches('^ocp-release-bundle-[0-9]+\\.[0-9]+\\.[0-9]+-[A-Za-z0-9._-]+$')`,message="must be ocp-release-bundle-- and <= 64 chars" + // +required + Name string `json:"name,omitempty"` + // image is an OCP release image referenced by digest. + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // The field is optional, and it will be provided after a release will be successfully installed. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=447 + // +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" + // +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" + // +optional + Image string `json:"image,omitempty"` } +// InternalReleaseImageConditionType is each possible state for each possible InternalReleaseImageBundleStatus +// conditions type. +// +enum +type InternalReleaseImageConditionType string + +const ( + // InternalReleaseImageConditionTypeMounted describes a new release, not yet installed, that has been discovered when an ISO has been attached to + // the current node + InternalReleaseImageConditionTypeMounted InternalReleaseImageConditionType = "Mounted" + // InternalReleaseImageConditionTypeInstalling describes a new release that is getting installed on the current node. Due the size of the data + // transfered, the operation could take several minutes + InternalReleaseImageConditionTypeInstalling InternalReleaseImageConditionType = "Installing" + // InternalReleaseImageConditionTypeAvailable describes a release that has been successfully installed on the current node, ready to be consumed + InternalReleaseImageConditionTypeAvailable InternalReleaseImageConditionType = "Available" + // InternalReleaseImageConditionTypeRemoving describes an existing release that is getting removed from the current node + InternalReleaseImageConditionTypeRemoving InternalReleaseImageConditionType = "Removing" + // InternalReleaseImageConditionTypeDegraded describes a failure for the current release + InternalReleaseImageConditionTypeDegraded InternalReleaseImageConditionType = "Degraded" +) + // IrreconcilableChangeDiff holds an individual diff between the initial install-time MachineConfig // and the latest applied one caused by the presence of irreconcilable changes. type IrreconcilableChangeDiff struct { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go index c8a7667fe764..5061d8b822e7 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go @@ -820,6 +820,7 @@ func (in *MachineConfigNodeStatus) DeepCopyInto(out *MachineConfigNodeStatus) { *out = make([]IrreconcilableChangeDiff, len(*in)) copy(*out, *in) } + in.InternalReleaseImage.DeepCopyInto(&out.InternalReleaseImage) return } @@ -849,6 +850,52 @@ func (in *MachineConfigNodeStatusConfigImage) DeepCopy() *MachineConfigNodeStatu return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatusInternalReleaseImage) DeepCopyInto(out *MachineConfigNodeStatusInternalReleaseImage) { + *out = *in + if in.Releases != nil { + in, out := &in.Releases, &out.Releases + *out = make([]MachineConfigNodeStatusInternalReleaseImageRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatusInternalReleaseImage. +func (in *MachineConfigNodeStatusInternalReleaseImage) DeepCopy() *MachineConfigNodeStatusInternalReleaseImage { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatusInternalReleaseImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigNodeStatusInternalReleaseImageRef) DeepCopyInto(out *MachineConfigNodeStatusInternalReleaseImageRef) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigNodeStatusInternalReleaseImageRef. +func (in *MachineConfigNodeStatusInternalReleaseImageRef) DeepCopy() *MachineConfigNodeStatusInternalReleaseImageRef { + if in == nil { + return nil + } + out := new(MachineConfigNodeStatusInternalReleaseImageRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfigNodeStatusMachineConfigVersion) DeepCopyInto(out *MachineConfigNodeStatusMachineConfigVersion) { *out = *in @@ -999,6 +1046,7 @@ func (in *MachineConfigPoolSpec) DeepCopyInto(out *MachineConfigPoolSpec) { *out = make([]PinnedImageSetRef, len(*in)) copy(*out, *in) } + out.OSImageStream = in.OSImageStream return } @@ -1035,6 +1083,7 @@ func (in *MachineConfigPoolStatus) DeepCopyInto(out *MachineConfigPoolStatus) { *out = make([]PoolSynchronizerStatus, len(*in)) copy(*out, *in) } + out.OSImageStream = in.OSImageStream return } @@ -1441,6 +1490,22 @@ func (in *NetworkInfo) DeepCopy() *NetworkInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamReference) DeepCopyInto(out *OSImageStreamReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamReference. +func (in *OSImageStreamReference) DeepCopy() *OSImageStreamReference { + if in == nil { + return nil + } + out := new(OSImageStreamReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index 7d78ab96464f..1d96519e7e5c 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -29,14 +29,17 @@ controllerconfigs.machineconfiguration.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNSInstall + - AWSDualStackInstall - AzureClusterHostedDNSInstall + - AzureDualStackInstall - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNSInstall - - GCPCustomAPIEndpointsInstall - HighlyAvailableArbiter - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets + - OnPremDNSRecords + - VSphereHostVMGroupZonal - VSphereMultiNetworks FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" @@ -120,6 +123,7 @@ machineconfignodes.machineconfiguration.openshift.io: - ImageModeStatusReporting - IrreconcilableMachineConfig - MachineConfigNodes + - NoRegistryClusterInstall FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" @@ -198,6 +202,7 @@ machineconfigpools.machineconfiguration.openshift.io: Capability: "" Category: "" FeatureGates: + - OSStreams - PinnedImages FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go index 3a0b0646a697..650fc1709daf 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go @@ -289,6 +289,7 @@ var map_MachineConfigPoolSpec = map[string]string{ "maxUnavailable": "maxUnavailable defines either an integer number or percentage of nodes in the pool that can go Unavailable during an update. This includes nodes Unavailable for any reason, including user initiated cordons, failing nodes, etc. The default value is 1.\n\nA value larger than 1 will mean multiple nodes going unavailable during the update, which may affect your workload stress on the remaining nodes. You cannot set this value to 0 to stop updates (it will default back to 1); to stop updates, use the 'paused' property instead. Drain will respect Pod Disruption Budgets (PDBs) such as etcd quorum guards, even if maxUnavailable is greater than one.", "configuration": "The targeted MachineConfig object for the machine config pool.", "pinnedImageSets": "pinnedImageSets specifies a sequence of PinnedImageSetRef objects for the pool. Nodes within this pool will preload and pin images defined in the PinnedImageSet. Before pulling images the MachineConfigDaemon will ensure the total uncompressed size of all the images does not exceed available resources. If the total size of the images exceeds the available resources the controller will report a Degraded status to the MachineConfigPool and not attempt to pull any images. Also to help ensure the kubelet can mitigate storage risk, the pinned_image configuration and subsequent service reload will happen only after all of the images have been pulled for each set. Images from multiple PinnedImageSets are loaded and pinned sequentially as listed. Duplicate and existing images will be skipped.\n\nAny failure to prefetch or pin images will result in a Degraded pool. Resolving these failures is the responsibility of the user. The admin should be proactive in ensuring adequate storage and proper image authentication exists in advance.", + "osImageStream": "osImageStream specifies an OS stream to be used for the pool.\n\nThis field can be optionally set to a known OSImageStream name to change the OS and Extension images with a well-known, tested, release-provided set of images. This enables a streamlined way of switching the pool's node OS to a different version than the cluster default, such as transitioning to a major RHEL version.\n\nWhen set, the referenced stream overrides the cluster-wide OS images for the pool with the OS and Extensions associated to stream. When omitted, the pool uses the cluster-wide default OS images.", } func (MachineConfigPoolSpec) SwaggerDoc() map[string]string { @@ -307,6 +308,7 @@ var map_MachineConfigPoolStatus = map[string]string{ "conditions": "conditions represents the latest available observations of current state.", "certExpirys": "certExpirys keeps track of important certificate expiration data", "poolSynchronizersStatus": "poolSynchronizersStatus is the status of the machines managed by the pool synchronizers.", + "osImageStream": "osImageStream specifies the last updated OSImageStream for the pool.\n\nWhen omitted, the pool is using the cluster-wide default OS images.", } func (MachineConfigPoolStatus) SwaggerDoc() map[string]string { @@ -346,6 +348,14 @@ func (NetworkInfo) SwaggerDoc() map[string]string { return map_NetworkInfo } +var map_OSImageStreamReference = map[string]string{ + "name": "name is a required reference to an OSImageStream to be used for the pool.\n\nIt must be a valid RFC 1123 subdomain between 1 and 253 characters in length, consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.').", +} + +func (OSImageStreamReference) SwaggerDoc() map[string]string { + return map_OSImageStreamReference +} + var map_PinnedImageSetRef = map[string]string{ "name": "name is a reference to the name of a PinnedImageSet. Must adhere to RFC-1123 (https://tools.ietf.org/html/rfc1123). Made up of one of more period-separated (.) segments, where each segment consists of alphanumeric characters and hyphens (-), must begin and end with an alphanumeric character, and is at most 63 characters in length. The total length of the name must not exceed 253 characters.", } @@ -446,6 +456,7 @@ var map_MachineConfigNodeStatus = map[string]string{ "configImage": "configImage is an optional field for configuring the OS image to be used for this node. This field will only exist if the node belongs to a pool opted into on-cluster image builds, and will override any MachineConfig referenced OSImageURL fields. When omitted, this means that the Image Mode feature is not being used and the node will be up to date with the specific current rendered config version for the nodes MachinePool. When specified, the Image Mode feature is enabled and the contents of this field show the observed state of the node image. When Image Mode is enabled and a new MachineConfig is applied such that a new OS image build is not created, only the configVersion field will change. When Image Mode is enabled and a new MachineConfig is applied such that a new OS image build is created, then only the configImage field will change. It is also possible that both the configImage and configVersion change during the same update.", "pinnedImageSets": "pinnedImageSets describes the current and desired pinned image sets for this node.", "irreconcilableChanges": "irreconcilableChanges is an optional field that contains the observed differences between this nodes configuration and the target rendered MachineConfig. This field will be set when there are changes to the target rendered MachineConfig that can only be applied to new nodes joining the cluster. Entries must be unique, keyed on the fieldPath field. Must not exceed 32 entries.", + "internalReleaseImage": "internalReleaseImage describes the status of the release payloads stored in the node. When specified, an internalReleaseImage custom resource exists on the cluster, and the specified images will be made available on the control plane nodes. This field will reflect the actual on-disk state of those release images.", } func (MachineConfigNodeStatus) SwaggerDoc() map[string]string { @@ -462,6 +473,26 @@ func (MachineConfigNodeStatusConfigImage) SwaggerDoc() map[string]string { return map_MachineConfigNodeStatusConfigImage } +var map_MachineConfigNodeStatusInternalReleaseImage = map[string]string{ + "": "MachineConfigNodeStatusInternalReleaseImage holds information about the current and discovered release bundles for the observed machine config node.", + "releases": "releases is a list of the release bundles currently owned and managed by the cluster. A release bundle content could be safely pulled only when its Conditions field contains at least an Available entry set to \"True\" and Degraded to \"False\". Entries must be unique, keyed on the name field. releases must contain at least one entry and must not exceed 32 entries.", +} + +func (MachineConfigNodeStatusInternalReleaseImage) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatusInternalReleaseImage +} + +var map_MachineConfigNodeStatusInternalReleaseImageRef = map[string]string{ + "": "MachineConfigNodeStatusInternalReleaseImageRef is used to provide a more detailed reference for a release bundle.", + "conditions": "conditions represent the observations of an internal release image current state. Valid types are: Mounted, Installing, Available, Removing and Degraded.\n\nIf Mounted is true, that means that a valid ISO has been mounted on the current node. If Installing is true, that means that a new release bundle is currently being copied on the current node, and not yet completed. If Available is true, it means that the release has been previously installed on the current node, and it can be used. If Removing is true, it means that a release deletion is in progress on the current node, and not yet completed. If Degraded is true, that means something has gone wrong in the current node.", + "name": "name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. The expected name format is ocp-release-bundle--.", + "image": "image is an OCP release image referenced by digest. The format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters. The field is optional, and it will be provided after a release will be successfully installed.", +} + +func (MachineConfigNodeStatusInternalReleaseImageRef) SwaggerDoc() map[string]string { + return map_MachineConfigNodeStatusInternalReleaseImageRef +} + var map_MachineConfigNodeStatusMachineConfigVersion = map[string]string{ "": "MachineConfigNodeStatusMachineConfigVersion holds the current and desired config versions as last updated in the MCN status. When the current and desired versions do not match, the machine config pool is processing an upgrade and the machine config node will monitor the upgrade process. When the current and desired versions do match, the machine config node will ignore these events given that certain operations happen both during the MCO's upgrade mode and the daily operations mode.", "current": "current is the name of the machine config currently in use on the node. This value is updated once the machine config daemon has completed the update of the configuration for the node. This value should match the desired version unless an upgrade is in progress. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go index c60f521f9401..27610a91bc2e 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go @@ -28,6 +28,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MachineConfigNodeList{}, &PinnedImageSet{}, &PinnedImageSetList{}, + &OSImageStream{}, + &OSImageStreamList{}, + &InternalReleaseImage{}, + &InternalReleaseImageList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types.go new file mode 100644 index 000000000000..dd5792602b59 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types.go @@ -0,0 +1,10 @@ +package v1alpha1 + +// ImageDigestFormat is a type that conforms to the format host[:port][/namespace]/name@sha256:. +// The digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. +// The length of the field must be between 1 to 447 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=447 +// +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" +// +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" +type ImageDigestFormat string diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_internalreleaseimage.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_internalreleaseimage.go new file mode 100644 index 000000000000..cd888c967835 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_internalreleaseimage.go @@ -0,0 +1,178 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=internalreleaseimages,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2510 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +openshift:enable:FeatureGate=NoRegistryClusterInstall +// +kubebuilder:metadata:labels=openshift.io/operator-managed= +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="internalreleaseimage is a singleton, .metadata.name must be 'cluster'" + +// InternalReleaseImage is used to keep track and manage a set +// of release bundles (OCP and OLM operators images) that are stored +// into the control planes nodes. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InternalReleaseImage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the configuration of this internal release image. + // +required + Spec InternalReleaseImageSpec `json:"spec,omitempty,omitzero"` + + // status describes the last observed state of this internal release image. + // +optional + Status InternalReleaseImageStatus `json:"status,omitempty,omitzero"` +} + +// InternalReleaseImageSpec defines the desired state of a InternalReleaseImage. +type InternalReleaseImageSpec struct { + // releases is a list of release bundle identifiers that the user wants to + // add/remove to/from the control plane nodes. + // Entries must be unique, keyed on the name field. + // releases must contain at least one entry and must not exceed 16 entries. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +listType=map + // +listMapKey=name + // +required + Releases []InternalReleaseImageRef `json:"releases,omitempty"` +} + +// InternalReleaseImageRef is used to provide a simple reference for a release +// bundle. Currently it contains only the name field. +type InternalReleaseImageRef struct { + // name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. + // The expected name format is ocp-release-bundle--. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:XValidation:rule=`self.matches('^ocp-release-bundle-[0-9]+\\.[0-9]+\\.[0-9]+-[A-Za-z0-9._-]+$')`,message="must be ocp-release-bundle-- and <= 64 chars" + Name string `json:"name,omitempty"` +} + +// InternalReleaseImageStatus describes the current state of a InternalReleaseImage. +type InternalReleaseImageStatus struct { + // conditions represent the observations of the InternalReleaseImage controller current state. + // Valid types are: Degraded. + // If Degraded is true, that means something has gone wrong in the controller. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=20 + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // releases is a list of the release bundles currently owned and managed by the + // cluster. + // A release bundle content could be safely pulled only when its Conditions field + // contains at least an Available entry set to "True" and Degraded to "False". + // Entries must be unique, keyed on the name field. + // releases must contain at least one entry and must not exceed 32 entries. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + // +required + Releases []InternalReleaseImageBundleStatus `json:"releases,omitempty"` +} + +// InternalReleaseImageStatusConditionType describes the possible states for InternalReleaseImageStatus. +// +enum +type InternalReleaseImageStatusConditionType string + +const ( + // InternalReleaseImageStatusConditionTypeDegraded describes a failure in the controller. + InternalReleaseImageStatusConditionTypeDegraded InternalReleaseImageStatusConditionType = "Degraded" +) + +type InternalReleaseImageBundleStatus struct { + // conditions represent the observations of an internal release image current state. Valid types are: + // Mounted, Installing, Available, Removing and Degraded. + // + // If Mounted is true, that means that a valid ISO has been discovered and mounted on one of the cluster nodes. + // If Installing is true, that means that a new release bundle is currently being copied on one (or more) cluster nodes, and not yet completed. + // If Available is true, it means that the release has been previously installed on all the cluster nodes, and it can be used. + // If Removing is true, it means that a release deletion is in progress on one (or more) cluster nodes, and not yet completed. + // If Degraded is true, that means something has gone wrong (possibly on one or more cluster nodes). + // + // In general, after installing a new release bundle, it is required to wait for the Conditions "Available" to become "True" (and all + // the other conditions to be equal to "False") before being able to pull its content. + // + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=5 + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. + // The expected name format is ocp-release-bundle--. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:XValidation:rule=`self.matches('^ocp-release-bundle-[0-9]+\\.[0-9]+\\.[0-9]+-[A-Za-z0-9._-]+$')`,message="must be ocp-release-bundle-- and <= 64 chars" + // +required + Name string `json:"name,omitempty"` + // image is an OCP release image referenced by digest. + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // The field is optional, and it will be provided after a release will be successfully installed. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=447 + // +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" + // +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" + // +optional + Image string `json:"image,omitempty"` +} + +// InternalReleaseImageConditionType is each possible state for each possible InternalReleaseImageBundleStatus +// conditions type. +// +enum +type InternalReleaseImageConditionType string + +const ( + // InternalReleaseImageConditionTypeMounted describes a new release, not yet installed, that has been discovered when an ISO has been attached to + // one of the control plane nodes + InternalReleaseImageConditionTypeMounted InternalReleaseImageConditionType = "Mounted" + // InternalReleaseImageConditionTypeInstalling describes a new release that is getting installed in the cluster. Due the size of the data + // transfered, the operation could take several minutes. The condition will remain in such state until all the control plane nodes will + // complete the installing operation + InternalReleaseImageConditionTypeInstalling InternalReleaseImageConditionType = "Installing" + // InternalReleaseImageConditionTypeAvailable describes a release that has been successfully installed in the cluster, ready to be consumed. This + // means that the release has been successfully installed on all the control plane nodes + InternalReleaseImageConditionTypeAvailable InternalReleaseImageConditionType = "Available" + // InternalReleaseImageConditionTypeRemoving describes an existing release that is getting removed from the cluster. The condition will remain in such + // state until all the control plane nodes will complete the removal operation + InternalReleaseImageConditionTypeRemoving InternalReleaseImageConditionType = "Removing" + // InternalReleaseImageConditionTypeDegraded describes a failure, happened in one or more control plane nodes, for the current release + InternalReleaseImageConditionTypeDegraded InternalReleaseImageConditionType = "Degraded" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InternalReleaseImageList is a list of InternalReleaseImage resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InternalReleaseImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []InternalReleaseImage `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go new file mode 100644 index 000000000000..cb6163ddb739 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go @@ -0,0 +1,131 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OSImageStream describes a set of streams and associated images available +// for the MachineConfigPools to be used as base OS images. +// +// The resource is a singleton named "cluster". +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=osimagestreams,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2555 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +openshift:enable:FeatureGate=OSStreams +// +kubebuilder:metadata:labels=openshift.io/operator-managed= +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="osimagestream is a singleton, .metadata.name must be 'cluster'" +type OSImageStream struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec contains the desired OSImageStream config configuration. + // +required + Spec *OSImageStreamSpec `json:"spec,omitempty"` + + // status describes the last observed state of this OSImageStream. + // Populated by the MachineConfigOperator after reading release metadata. + // When not present, the controller has not yet reconciled this resource. + // +optional + Status OSImageStreamStatus `json:"status,omitempty,omitzero"` +} + +// OSImageStreamStatus describes the current state of a OSImageStream +// +kubebuilder:validation:XValidation:rule="self.defaultStream in self.availableStreams.map(s, s.name)",message="defaultStream must reference a stream name from availableStreams" +type OSImageStreamStatus struct { + + // availableStreams is a list of the available OS Image Streams that can be + // used as the base image for MachineConfigPools. + // availableStreams is required, must have at least one item, must not exceed + // 100 items, and must have unique entries keyed on the name field. + // + // +required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + AvailableStreams []OSImageStreamSet `json:"availableStreams,omitempty"` + + // defaultStream is the name of the stream that should be used as the default + // when no specific stream is requested by a MachineConfigPool. + // + // It must be a valid RFC 1123 subdomain between 1 and 253 characters in length, + // consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'), + // and must reference the name of one of the streams in availableStreams. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + DefaultStream string `json:"defaultStream,omitempty"` +} + +// OSImageStreamSpec defines the desired state of a OSImageStream. +type OSImageStreamSpec struct { +} + +type OSImageStreamSet struct { + // name is the required identifier of the stream. + // + // name is determined by the operator based on the OCI label of the + // discovered OS or Extension Image. + // + // Must be a valid RFC 1123 subdomain between 1 and 253 characters in length, + // consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + Name string `json:"name,omitempty"` + + // osImage is a required OS Image referenced by digest. + // + // osImage contains the immutable, fundamental operating system components, including the kernel + // and base utilities, that define the core environment for the node's host operating system. + // + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // +required + OSImage ImageDigestFormat `json:"osImage,omitempty"` + + // osExtensionsImage is a required OS Extensions Image referenced by digest. + // + // osExtensionsImage bundles the extra repositories used to enable extensions, augmenting + // the base operating system without modifying the underlying immutable osImage. + // + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // +required + OSExtensionsImage ImageDigestFormat `json:"osExtensionsImage,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OSImageStreamList is a list of OSImageStream resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type OSImageStreamList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []OSImageStream `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go index 7373c610a0c0..4708609fc5d0 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go @@ -67,15 +67,11 @@ type PinnedImageSetSpec struct { type PinnedImageRef struct { // name is an OCI Image referenced by digest. - // - // The format of the image ref is: - // host[:port][/namespace]/name@sha256: + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. // +required - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=447 - // +kubebuilder:validation:XValidation:rule=`self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" - // +kubebuilder:validation:XValidation:rule=`self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" - Name string `json:"name"` + Name ImageDigestFormat `json:"name,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go index 5e9e7a8c089c..69b63f67789e 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go @@ -10,6 +10,157 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImage) DeepCopyInto(out *InternalReleaseImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImage. +func (in *InternalReleaseImage) DeepCopy() *InternalReleaseImage { + if in == nil { + return nil + } + out := new(InternalReleaseImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InternalReleaseImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImageBundleStatus) DeepCopyInto(out *InternalReleaseImageBundleStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImageBundleStatus. +func (in *InternalReleaseImageBundleStatus) DeepCopy() *InternalReleaseImageBundleStatus { + if in == nil { + return nil + } + out := new(InternalReleaseImageBundleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImageList) DeepCopyInto(out *InternalReleaseImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InternalReleaseImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImageList. +func (in *InternalReleaseImageList) DeepCopy() *InternalReleaseImageList { + if in == nil { + return nil + } + out := new(InternalReleaseImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InternalReleaseImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImageRef) DeepCopyInto(out *InternalReleaseImageRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImageRef. +func (in *InternalReleaseImageRef) DeepCopy() *InternalReleaseImageRef { + if in == nil { + return nil + } + out := new(InternalReleaseImageRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImageSpec) DeepCopyInto(out *InternalReleaseImageSpec) { + *out = *in + if in.Releases != nil { + in, out := &in.Releases, &out.Releases + *out = make([]InternalReleaseImageRef, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImageSpec. +func (in *InternalReleaseImageSpec) DeepCopy() *InternalReleaseImageSpec { + if in == nil { + return nil + } + out := new(InternalReleaseImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalReleaseImageStatus) DeepCopyInto(out *InternalReleaseImageStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Releases != nil { + in, out := &in.Releases, &out.Releases + *out = make([]InternalReleaseImageBundleStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalReleaseImageStatus. +func (in *InternalReleaseImageStatus) DeepCopy() *InternalReleaseImageStatus { + if in == nil { + return nil + } + out := new(InternalReleaseImageStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MCOObjectReference) DeepCopyInto(out *MCOObjectReference) { *out = *in @@ -183,6 +334,124 @@ func (in *MachineConfigNodeStatusPinnedImageSet) DeepCopy() *MachineConfigNodeSt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStream) DeepCopyInto(out *OSImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(OSImageStreamSpec) + **out = **in + } + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStream. +func (in *OSImageStream) DeepCopy() *OSImageStream { + if in == nil { + return nil + } + out := new(OSImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OSImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamList) DeepCopyInto(out *OSImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OSImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamList. +func (in *OSImageStreamList) DeepCopy() *OSImageStreamList { + if in == nil { + return nil + } + out := new(OSImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OSImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamSet) DeepCopyInto(out *OSImageStreamSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamSet. +func (in *OSImageStreamSet) DeepCopy() *OSImageStreamSet { + if in == nil { + return nil + } + out := new(OSImageStreamSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamSpec) DeepCopyInto(out *OSImageStreamSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamSpec. +func (in *OSImageStreamSpec) DeepCopy() *OSImageStreamSpec { + if in == nil { + return nil + } + out := new(OSImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamStatus) DeepCopyInto(out *OSImageStreamStatus) { + *out = *in + if in.AvailableStreams != nil { + in, out := &in.AvailableStreams, &out.AvailableStreams + *out = make([]OSImageStreamSet, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamStatus. +func (in *OSImageStreamStatus) DeepCopy() *OSImageStreamStatus { + if in == nil { + return nil + } + out := new(OSImageStreamStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PinnedImageRef) DeepCopyInto(out *PinnedImageRef) { *out = *in diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 940491093038..dc82dc2049a6 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,3 +1,27 @@ +internalreleaseimages.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2510 + CRDName: internalreleaseimages.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - NoRegistryClusterInstall + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: InternalReleaseImage + Labels: + openshift.io/operator-managed: "" + PluralName: internalreleaseimages + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NoRegistryClusterInstall + Version: v1alpha1 + machineconfignodes.machineconfiguration.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2256 @@ -74,6 +98,30 @@ machineconfignodes.machineconfiguration.openshift.io: - MachineConfigNodes Version: v1alpha1 +osimagestreams.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2555 + CRDName: osimagestreams.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OSStreams + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: OSImageStream + Labels: + openshift.io/operator-managed: "" + PluralName: osimagestreams + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - OSStreams + Version: v1alpha1 + pinnedimagesets.machineconfiguration.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1713 diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go index a5b0dcfb31a4..144d295afeb2 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,64 @@ package v1alpha1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_InternalReleaseImage = map[string]string{ + "": "InternalReleaseImage is used to keep track and manage a set of release bundles (OCP and OLM operators images) that are stored into the control planes nodes.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the configuration of this internal release image.", + "status": "status describes the last observed state of this internal release image.", +} + +func (InternalReleaseImage) SwaggerDoc() map[string]string { + return map_InternalReleaseImage +} + +var map_InternalReleaseImageBundleStatus = map[string]string{ + "conditions": "conditions represent the observations of an internal release image current state. Valid types are: Mounted, Installing, Available, Removing and Degraded.\n\nIf Mounted is true, that means that a valid ISO has been discovered and mounted on one of the cluster nodes. If Installing is true, that means that a new release bundle is currently being copied on one (or more) cluster nodes, and not yet completed. If Available is true, it means that the release has been previously installed on all the cluster nodes, and it can be used. If Removing is true, it means that a release deletion is in progress on one (or more) cluster nodes, and not yet completed. If Degraded is true, that means something has gone wrong (possibly on one or more cluster nodes).\n\nIn general, after installing a new release bundle, it is required to wait for the Conditions \"Available\" to become \"True\" (and all the other conditions to be equal to \"False\") before being able to pull its content.", + "name": "name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. The expected name format is ocp-release-bundle--.", + "image": "image is an OCP release image referenced by digest. The format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters. The field is optional, and it will be provided after a release will be successfully installed.", +} + +func (InternalReleaseImageBundleStatus) SwaggerDoc() map[string]string { + return map_InternalReleaseImageBundleStatus +} + +var map_InternalReleaseImageList = map[string]string{ + "": "InternalReleaseImageList is a list of InternalReleaseImage resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InternalReleaseImageList) SwaggerDoc() map[string]string { + return map_InternalReleaseImageList +} + +var map_InternalReleaseImageRef = map[string]string{ + "": "InternalReleaseImageRef is used to provide a simple reference for a release bundle. Currently it contains only the name field.", + "name": "name indicates the desired release bundle identifier. This field is required and must be between 1 and 64 characters long. The expected name format is ocp-release-bundle--.", +} + +func (InternalReleaseImageRef) SwaggerDoc() map[string]string { + return map_InternalReleaseImageRef +} + +var map_InternalReleaseImageSpec = map[string]string{ + "": "InternalReleaseImageSpec defines the desired state of a InternalReleaseImage.", + "releases": "releases is a list of release bundle identifiers that the user wants to add/remove to/from the control plane nodes. Entries must be unique, keyed on the name field. releases must contain at least one entry and must not exceed 16 entries.", +} + +func (InternalReleaseImageSpec) SwaggerDoc() map[string]string { + return map_InternalReleaseImageSpec +} + +var map_InternalReleaseImageStatus = map[string]string{ + "": "InternalReleaseImageStatus describes the current state of a InternalReleaseImage.", + "conditions": "conditions represent the observations of the InternalReleaseImage controller current state. Valid types are: Degraded. If Degraded is true, that means something has gone wrong in the controller.", + "releases": "releases is a list of the release bundles currently owned and managed by the cluster. A release bundle content could be safely pulled only when its Conditions field contains at least an Available entry set to \"True\" and Degraded to \"False\". Entries must be unique, keyed on the name field. releases must contain at least one entry and must not exceed 32 entries.", +} + +func (InternalReleaseImageStatus) SwaggerDoc() map[string]string { + return map_InternalReleaseImageStatus +} + var map_MCOObjectReference = map[string]string{ "": "MCOObjectReference holds information about an object the MCO either owns or modifies in some way", "name": "name is the name of the object being referenced. For example, this can represent a machine config pool or node name. Must be a lowercase RFC-1123 subdomain name (https://tools.ietf.org/html/rfc1123) consisting of only lowercase alphanumeric characters, hyphens (-), and periods (.), and must start and end with an alphanumeric character, and be at most 253 characters in length.", @@ -96,8 +154,56 @@ func (MachineConfigNodeStatusPinnedImageSet) SwaggerDoc() map[string]string { return map_MachineConfigNodeStatusPinnedImageSet } +var map_OSImageStream = map[string]string{ + "": "OSImageStream describes a set of streams and associated images available for the MachineConfigPools to be used as base OS images.\n\nThe resource is a singleton named \"cluster\".\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the desired OSImageStream config configuration.", + "status": "status describes the last observed state of this OSImageStream. Populated by the MachineConfigOperator after reading release metadata. When not present, the controller has not yet reconciled this resource.", +} + +func (OSImageStream) SwaggerDoc() map[string]string { + return map_OSImageStream +} + +var map_OSImageStreamList = map[string]string{ + "": "OSImageStreamList is a list of OSImageStream resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OSImageStreamList) SwaggerDoc() map[string]string { + return map_OSImageStreamList +} + +var map_OSImageStreamSet = map[string]string{ + "name": "name is the required identifier of the stream.\n\nname is determined by the operator based on the OCI label of the discovered OS or Extension Image.\n\nMust be a valid RFC 1123 subdomain between 1 and 253 characters in length, consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.').", + "osImage": "osImage is a required OS Image referenced by digest.\n\nosImage contains the immutable, fundamental operating system components, including the kernel and base utilities, that define the core environment for the node's host operating system.\n\nThe format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters.", + "osExtensionsImage": "osExtensionsImage is a required OS Extensions Image referenced by digest.\n\nosExtensionsImage bundles the extra repositories used to enable extensions, augmenting the base operating system without modifying the underlying immutable osImage.\n\nThe format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters.", +} + +func (OSImageStreamSet) SwaggerDoc() map[string]string { + return map_OSImageStreamSet +} + +var map_OSImageStreamSpec = map[string]string{ + "": "OSImageStreamSpec defines the desired state of a OSImageStream.", +} + +func (OSImageStreamSpec) SwaggerDoc() map[string]string { + return map_OSImageStreamSpec +} + +var map_OSImageStreamStatus = map[string]string{ + "": "OSImageStreamStatus describes the current state of a OSImageStream", + "availableStreams": "availableStreams is a list of the available OS Image Streams that can be used as the base image for MachineConfigPools. availableStreams is required, must have at least one item, must not exceed 100 items, and must have unique entries keyed on the name field.", + "defaultStream": "defaultStream is the name of the stream that should be used as the default when no specific stream is requested by a MachineConfigPool.\n\nIt must be a valid RFC 1123 subdomain between 1 and 253 characters in length, consisting of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'), and must reference the name of one of the streams in availableStreams.", +} + +func (OSImageStreamStatus) SwaggerDoc() map[string]string { + return map_OSImageStreamStatus +} + var map_PinnedImageRef = map[string]string{ - "name": "name is an OCI Image referenced by digest.\n\nThe format of the image ref is: host[:port][/namespace]/name@sha256:", + "name": "name is an OCI Image referenced by digest. The format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters.", } func (PinnedImageRef) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index e030a65c8601..35795b2b71b8 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -107,6 +107,9 @@ const ( // gettingStartedBanner is the name of the 'Getting started resources' banner in the console UI Overview page. GettingStartedBanner ConsoleCapabilityName = "GettingStartedBanner" + + // guidedTour is the name of the 'Guided Tour' feature in console UI. + GuidedTour ConsoleCapabilityName = "GuidedTour" ) // CapabilityState defines the state of the capability in the console UI. @@ -134,8 +137,8 @@ type CapabilityVisibility struct { // Capabilities contains set of UI capabilities and their state in the console UI. type Capability struct { // name is the unique name of a capability. - // Available capabilities are LightspeedButton and GettingStartedBanner. - // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner" + // Available capabilities are LightspeedButton, GettingStartedBanner, and GuidedTour. + // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner";"GuidedTour" // +required Name ConsoleCapabilityName `json:"name"` // visibility defines the visibility state of the capability. @@ -281,10 +284,10 @@ type ConsoleCustomization struct { // capabilities defines an array of capabilities that can be interacted with in the console UI. // Each capability defines a visual state that can be interacted with the console to render in the UI. - // Available capabilities are LightspeedButton and GettingStartedBanner. + // Available capabilities are LightspeedButton, GettingStartedBanner, and GuidedTour. // Each of the available capabilities may appear only once in the list. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MaxItems=3 // +listType=map // +listMapKey=name // +optional diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 279990448249..53c71aabb657 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -81,7 +81,6 @@ const ( CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" - OvirtCSIDriver CSIDriverName = "csi.ovirt.org" KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io" AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 2dac08f099bb..d54352f2ceed 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -35,6 +35,7 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +kubebuilder:validation:XValidation:rule="!has(self.spec.domain) || size('router-' + self.metadata.name + '.' + self.spec.domain) <= 253",message="The combined 'router-' + metadata.name + '.' + .spec.domain cannot exceed 253 characters" type IngressController struct { metav1.TypeMeta `json:",inline"` @@ -68,6 +69,22 @@ type IngressControllerSpec struct { // // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. // + // The domain value must be a valid DNS name. It must consist of lowercase + // alphanumeric characters, '-' or '.', and each label must start and end + // with an alphanumeric character and not exceed 63 characters. Maximum + // length of a valid DNS domain is 253 characters. + // + // The implementation may add a prefix such as "router-default." to the domain + // when constructing the router canonical hostname. To ensure the resulting + // hostname does not exceed the DNS maximum length of 253 characters, + // the domain length is additionally validated at the IngressController object + // level. For the maximum length of the domain value itself, the shortest + // possible variant of the prefix and the ingress controller name was considered + // for example "router-a." + // + // +kubebuilder:validation:MaxLength=244 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="domain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('.').all(label, size(label) <= 63)",message="each DNS label must not exceed 63 characters" // +optional Domain string `json:"domain,omitempty"` @@ -281,9 +298,9 @@ type IngressControllerSpec struct { // case HAProxy handles it in the old process and closes // the connection after sending the response. // - // - HAProxy's `timeout http-keep-alive` duration expires - // (300 seconds in OpenShift's configuration, not - // configurable). + // - HAProxy's `timeout http-keep-alive` duration expires. + // By default this is 300 seconds, but it can be changed + // using httpKeepAliveTimeout tuning option. // // - The client's keep-alive timeout expires, causing the // client to close the connection. @@ -327,6 +344,47 @@ type IngressControllerSpec struct { // +kubebuilder:default:="Immediate" // +default="Immediate" IdleConnectionTerminationPolicy IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` + + // closedClientConnectionPolicy controls how the IngressController + // behaves when the client closes the TCP connection while the TLS + // handshake or HTTP request is in progress. This option maps directly + // to HAProxy’s "abortonclose" option. + // + // Valid values are: "Abort" and "Continue". + // The default value is "Continue". + // + // When set to "Abort", the router will stop processing the TLS handshake + // if it is in progress, and it will not send an HTTP request to the backend server + // if the request has not yet been sent when the client closes the connection. + // + // When set to "Continue", the router will complete the TLS handshake + // if it is in progress, or send an HTTP request to the backend server + // and wait for the backend server's response, regardless of + // whether the client has closed the connection. + // + // Setting "Abort" can help free CPU resources otherwise spent on TLS computation + // for connections the client has already closed, and can reduce request queue + // size, thereby reducing the load on saturated backend servers. + // + // Important Considerations: + // + // - The default policy ("Continue") is HTTP-compliant, and requests + // for aborted client connections will still be served. + // Use the "Continue" policy to allow a client to send a request + // and then immediately close its side of the connection while + // still receiving a response on the half-closed connection. + // + // - When clients use keep-alive connections, the most common case for premature + // closure is when the user wants to cancel the transfer or when a timeout + // occurs. In that case, the "Abort" policy may be used to reduce resource consumption. + // + // - Using RSA keys larger than 2048 bits can significantly slow down + // TLS computations. Consider using the "Abort" policy to reduce CPU usage. + // + // +optional + // +kubebuilder:default:="Continue" + // +default="Continue" + ClosedClientConnectionPolicy IngressControllerClosedClientConnectionPolicy `json:"closedClientConnectionPolicy,omitempty"` } // httpCompressionPolicy turns on compression for the specified MIME types. @@ -1867,6 +1925,36 @@ type IngressControllerTuningOptions struct { // +optional ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + // httpKeepAliveTimeout defines the maximum allowed time to wait for + // a new HTTP request to appear on a connection from the client to the router. + // + // This field expects an unsigned duration string of a decimal number, with optional + // fraction and a unit suffix, e.g. "300ms", "1.5s" or "2m45s". + // Valid time units are "ms", "s", "m". + // The allowed range is from 1 millisecond to 15 minutes. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose a reasonable default. This default is subject to change over time. + // The current default is 300s. + // + // Low values (tens of milliseconds or less) can cause clients to close and reopen connections + // for each request, leading to reduced connection sharing. + // For HTTP/2, special care should be taken with low values. + // A few seconds is a reasonable starting point to avoid holding idle connections open + // while still allowing subsequent requests to reuse the connection. + // + // High values (minutes or more) favor connection reuse but may cause idle + // connections to linger longer. + // + // +kubebuilder:validation:Type:=string + // +kubebuilder:validation:XValidation:rule="self.matches('^([0-9]+(\\\\.[0-9]+)?(ms|s|m))+$')",message="httpKeepAliveTimeout must be a valid duration string composed of an unsigned integer value, optionally followed by a decimal fraction and a unit suffix (ms, s, m)" + // +kubebuilder:validation:XValidation:rule="!self.matches('^([0-9]+(\\\\.[0-9]+)?(ms|s|m))+$') || duration(self) <= duration('15m')",message="httpKeepAliveTimeout must be less than or equal to 15 minutes" + // +kubebuilder:validation:XValidation:rule="!self.matches('^([0-9]+(\\\\.[0-9]+)?(ms|s|m))+$') || duration(self) >= duration('1ms')",message="httpKeepAliveTimeout must be greater than or equal to 1 millisecond" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16 + // +optional + HTTPKeepAliveTimeout *metav1.Duration `json:"httpKeepAliveTimeout,omitempty"` + // tlsInspectDelay defines how long the router can hold data to find a // matching route. // @@ -2123,3 +2211,34 @@ const ( // connection. IngressControllerConnectionTerminationPolicyDeferred IngressControllerConnectionTerminationPolicy = "Deferred" ) + +// IngressControllerClosedClientConnectionPolicy controls how the IngressController +// behaves when the client closes the TCP connection while the TLS +// handshake or HTTP request is in progress. +// +// +kubebuilder:validation:Enum=Abort;Continue +type IngressControllerClosedClientConnectionPolicy string + +const ( + // IngressControllerClosedClientConnectionPolicyAbort aborts processing early when the client + // closes the connection. + // + // This affects two types of processing: TLS handshake computation on the router + // and request handling. + // + // When the client closes the connection, the router will stop processing + // the TLS handshake, preventing unnecessary CPU work. + // + // If the HTTP request has not yet been sent to the backend, it will be aborted. + // If the request is already being processed by the backend, the router will + // half-close the connection to signal this condition to the backend server, + // which can then decide how to proceed. + IngressControllerClosedClientConnectionPolicyAbort IngressControllerClosedClientConnectionPolicy = "Abort" + + // IngressControllerClosedClientConnectionPolicyContinue continues processing even if the client + // closes the connection. + // + // The router will complete the TLS handshake and wait for the backend + // server's response regardless of the client having closed the connection. + IngressControllerClosedClientConnectionPolicyContinue IngressControllerClosedClientConnectionPolicy = "Continue" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index c6bcd22bc0fc..f5836af0f8cc 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -18,7 +18,8 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 // +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? self.?spec.managedBootImages.hasValue() || self.?status.managedBootImagesStatus.hasValue() : true",message="when skew enforcement is in Automatic mode, a boot image configuration is required" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || self.spec.managedBootImages.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io') : true",message="when skew enforcement is in Automatic mode, managedBootImages must contain a MachineManager opting in all MachineAPI MachineSets" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || size(self.spec.managedBootImages.machineManagers) > 0 : true",message="when skew enforcement is in Automatic mode, managedBootImages.machineManagers must not be an empty list" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || !self.spec.managedBootImages.machineManagers.exists(m, m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io') || self.spec.managedBootImages.machineManagers.exists(m, m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io' && m.selection.mode == 'All') : true",message="when skew enforcement is in Automatic mode, any MachineAPI MachineSet MachineManager must use selection mode 'All'" // +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?status.managedBootImagesStatus.machineManagers.hasValue()) || self.status.managedBootImagesStatus.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io'): true",message="when skew enforcement is in Automatic mode, managedBootImagesStatus must contain a MachineManager opting in all MachineAPI MachineSets" type MachineConfiguration struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index fd83694c23fa..3bc6b81de467 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -2564,6 +2564,11 @@ func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTun *out = new(metav1.Duration) **out = **in } + if in.HTTPKeepAliveTimeout != nil { + in, out := &in.HTTPKeepAliveTimeout, &out.HTTPKeepAliveTimeout + *out = new(metav1.Duration) + **out = **in + } if in.TLSInspectDelay != nil { in, out := &in.TLSInspectDelay, &out.TLSInspectDelay *out = new(metav1.Duration) diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 483d9720da96..64aac26eb383 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -210,7 +210,7 @@ func (AddPage) SwaggerDoc() map[string]string { var map_Capability = map[string]string{ "": "Capabilities contains set of UI capabilities and their state in the console UI.", - "name": "name is the unique name of a capability. Available capabilities are LightspeedButton and GettingStartedBanner.", + "name": "name is the unique name of a capability. Available capabilities are LightspeedButton, GettingStartedBanner, and GuidedTour.", "visibility": "visibility defines the visibility state of the capability.", } @@ -259,7 +259,7 @@ func (ConsoleConfigRoute) SwaggerDoc() map[string]string { var map_ConsoleCustomization = map[string]string{ "": "ConsoleCustomization defines a list of optional configuration for the console UI. Ensure that Logos and CustomLogoFile cannot be set at the same time.", "logos": "logos is used to replace the OpenShift Masthead and Favicon logos in the console UI with custom logos. logos is an optional field that allows a list of logos. Only one of logos or customLogoFile can be set at a time. If logos is set, customLogoFile must be unset. When specified, there must be at least one entry and no more than 2 entries. Each type must appear only once in the list.", - "capabilities": "capabilities defines an array of capabilities that can be interacted with in the console UI. Each capability defines a visual state that can be interacted with the console to render in the UI. Available capabilities are LightspeedButton and GettingStartedBanner. Each of the available capabilities may appear only once in the list.", + "capabilities": "capabilities defines an array of capabilities that can be interacted with in the console UI. Each capability defines a visual state that can be interacted with the console to render in the UI. Available capabilities are LightspeedButton, GettingStartedBanner, and GuidedTour. Each of the available capabilities may appear only once in the list.", "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", @@ -1063,7 +1063,7 @@ func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", - "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.\n\nThe domain value must be a valid DNS name. It must consist of lowercase alphanumeric characters, '-' or '.', and each label must start and end with an alphanumeric character and not exceed 63 characters. Maximum length of a valid DNS domain is 253 characters.\n\nThe implementation may add a prefix such as \"router-default.\" to the domain when constructing the router canonical hostname. To ensure the resulting hostname does not exceed the DNS maximum length of 253 characters, the domain length is additionally validated at the IngressController object level. For the maximum length of the domain value itself, the shortest possible variant of the prefix and the ingress controller name was considered for example \"router-a.\"", "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", @@ -1080,7 +1080,8 @@ var map_IngressControllerSpec = map[string]string{ "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", - "idleConnectionTerminationPolicy": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires\n (300 seconds in OpenShift's configuration, not\n configurable).\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", + "idleConnectionTerminationPolicy": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires.\n By default this is 300 seconds, but it can be changed\n using httpKeepAliveTimeout tuning option.\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", + "closedClientConnectionPolicy": "closedClientConnectionPolicy controls how the IngressController behaves when the client closes the TCP connection while the TLS handshake or HTTP request is in progress. This option maps directly to HAProxy’s \"abortonclose\" option.\n\nValid values are: \"Abort\" and \"Continue\". The default value is \"Continue\".\n\nWhen set to \"Abort\", the router will stop processing the TLS handshake if it is in progress, and it will not send an HTTP request to the backend server if the request has not yet been sent when the client closes the connection.\n\nWhen set to \"Continue\", the router will complete the TLS handshake if it is in progress, or send an HTTP request to the backend server and wait for the backend server's response, regardless of whether the client has closed the connection.\n\nSetting \"Abort\" can help free CPU resources otherwise spent on TLS computation for connections the client has already closed, and can reduce request queue size, thereby reducing the load on saturated backend servers.\n\nImportant Considerations:\n\n - The default policy (\"Continue\") is HTTP-compliant, and requests\n for aborted client connections will still be served.\n Use the \"Continue\" policy to allow a client to send a request\n and then immediately close its side of the connection while\n still receiving a response on the half-closed connection.\n\n - When clients use keep-alive connections, the most common case for premature\n closure is when the user wants to cancel the transfer or when a timeout\n occurs. In that case, the \"Abort\" policy may be used to reduce resource consumption.\n\n - Using RSA keys larger than 2048 bits can significantly slow down\n TLS computations. Consider using the \"Abort\" policy to reduce CPU usage.", } func (IngressControllerSpec) SwaggerDoc() map[string]string { @@ -1115,6 +1116,7 @@ var map_IngressControllerTuningOptions = map[string]string{ "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", "connectTimeout": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "httpKeepAliveTimeout": "httpKeepAliveTimeout defines the maximum allowed time to wait for a new HTTP request to appear on a connection from the client to the router.\n\nThis field expects an unsigned duration string of a decimal number, with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5s\" or \"2m45s\". Valid time units are \"ms\", \"s\", \"m\". The allowed range is from 1 millisecond to 15 minutes.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 300s.\n\nLow values (tens of milliseconds or less) can cause clients to close and reopen connections for each request, leading to reduced connection sharing. For HTTP/2, special care should be taken with low values. A few seconds is a reasonable starting point to avoid holding idle connections open while still allowing subsequent requests to reuse the connection.\n\nHigh values (minutes or more) favor connection reuse but may cause idle connections to linger longer.", "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index e055eb0d2687..85018b16b76f 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -413,10 +413,12 @@ message RouterShard { // +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" // +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" message TLSConfig { - // termination indicates termination type. + // termination indicates the TLS termination type. // // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend // // Note: passthrough termination is incompatible with httpHeader actions diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 5a61f477e778..35c4064825cd 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -424,10 +424,12 @@ type RouterShard struct { // +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" // +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" type TLSConfig struct { - // termination indicates termination type. + // termination indicates the TLS termination type. // // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend // // Note: passthrough termination is incompatible with httpHeader actions diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index e6c44a6b0238..4c8f9eeddf3f 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -173,7 +173,7 @@ func (RouterShard) SwaggerDoc() map[string]string { var map_TLSConfig = map[string]string{ "": "TLSConfig defines config used to secure a route and provide termination", - "termination": "termination indicates termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", + "termination": "termination indicates the TLS termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default)\n\n* passthrough - Traffic is sent straight to the destination without the router providing TLS termination\n\n* reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", "certificate": "certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate.", "key": "key provides key file contents", "caCertificate": "caCertificate provides the cert authority certificate contents", diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go index b217e5bdcd44..53d86d2fdd9c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -2,6 +2,10 @@ package v1 +import ( + configv1 "github.com/openshift/api/config/v1" +) + // AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use // with apply. type AWSPlatformStatusApplyConfiguration struct { @@ -9,6 +13,7 @@ type AWSPlatformStatusApplyConfiguration struct { ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` + IPFamily *configv1.IPFamilyType `json:"ipFamily,omitempty"` } // AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with @@ -58,3 +63,11 @@ func (b *AWSPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value b.CloudLoadBalancerConfig = value return b } + +// WithIPFamily sets the IPFamily field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPFamily field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithIPFamily(value configv1.IPFamilyType) *AWSPlatformStatusApplyConfiguration { + b.IPFamily = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go index 3d1a83d28a16..774641c8290a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -15,6 +15,7 @@ type AzurePlatformStatusApplyConfiguration struct { ARMEndpoint *string `json:"armEndpoint,omitempty"` ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` + IPFamily *configv1.IPFamilyType `json:"ipFamily,omitempty"` } // AzurePlatformStatusApplyConfiguration constructs a declarative configuration of the AzurePlatformStatus type for use with @@ -75,3 +76,11 @@ func (b *AzurePlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(valu b.CloudLoadBalancerConfig = value return b } + +// WithIPFamily sets the IPFamily field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPFamily field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithIPFamily(value configv1.IPFamilyType) *AzurePlatformStatusApplyConfiguration { + b.IPFamily = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go index 55b875c7c469..315dc309cab5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go @@ -15,6 +15,7 @@ type BareMetalPlatformStatusApplyConfiguration struct { IngressIPs []string `json:"ingressIPs,omitempty"` NodeDNSIP *string `json:"nodeDNSIP,omitempty"` LoadBalancer *BareMetalPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + DNSRecordsType *configv1.DNSRecordsType `json:"dnsRecordsType,omitempty"` MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } @@ -76,6 +77,14 @@ func (b *BareMetalPlatformStatusApplyConfiguration) WithLoadBalancer(value *Bare return b } +// WithDNSRecordsType sets the DNSRecordsType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSRecordsType field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithDNSRecordsType(value configv1.DNSRecordsType) *BareMetalPlatformStatusApplyConfiguration { + b.DNSRecordsType = &value + return b +} + // WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MachineNetworks field. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go index 6c86d66d472b..8cee680f2779 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use // with apply. type ClusterImagePolicySpecApplyConfiguration struct { - Scopes []configv1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1 // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go index 3f67e9e35969..9c28888cf9a4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go @@ -10,7 +10,6 @@ type GCPPlatformStatusApplyConfiguration struct { ResourceLabels []GCPResourceLabelApplyConfiguration `json:"resourceLabels,omitempty"` ResourceTags []GCPResourceTagApplyConfiguration `json:"resourceTags,omitempty"` CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` - ServiceEndpoints []GCPServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` } // GCPPlatformStatusApplyConfiguration constructs a declarative configuration of the GCPPlatformStatus type for use with @@ -68,16 +67,3 @@ func (b *GCPPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value b.CloudLoadBalancerConfig = value return b } - -// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. -func (b *GCPPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*GCPServiceEndpointApplyConfiguration) *GCPPlatformStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithServiceEndpoints") - } - b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpserviceendpoint.go deleted file mode 100644 index 2cb9d0a7ca91..000000000000 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpserviceendpoint.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - configv1 "github.com/openshift/api/config/v1" -) - -// GCPServiceEndpointApplyConfiguration represents a declarative configuration of the GCPServiceEndpoint type for use -// with apply. -type GCPServiceEndpointApplyConfiguration struct { - Name *configv1.GCPServiceEndpointName `json:"name,omitempty"` - URL *string `json:"url,omitempty"` -} - -// GCPServiceEndpointApplyConfiguration constructs a declarative configuration of the GCPServiceEndpoint type for use with -// apply. -func GCPServiceEndpoint() *GCPServiceEndpointApplyConfiguration { - return &GCPServiceEndpointApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *GCPServiceEndpointApplyConfiguration) WithName(value configv1.GCPServiceEndpointName) *GCPServiceEndpointApplyConfiguration { - b.Name = &value - return b -} - -// WithURL sets the URL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the URL field is set to the value of the last call. -func (b *GCPServiceEndpointApplyConfiguration) WithURL(value string) *GCPServiceEndpointApplyConfiguration { - b.URL = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go similarity index 57% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go index 48b553580d3e..a4c831fca01d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go @@ -2,24 +2,24 @@ package v1 -// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use // with apply. -type FulcioCAWithRekorApplyConfiguration struct { +type ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration struct { FulcioCAData []byte `json:"fulcioCAData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` } -// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use with // apply. -func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { - return &FulcioCAWithRekorApplyConfiguration{} +func ImagePolicyFulcioCAWithRekorRootOfTrust() *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { + return &ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} } // WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the FulcioCAData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioCAData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.FulcioCAData = append(b.FulcioCAData, values[i]) } @@ -29,7 +29,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) * // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } @@ -39,7 +39,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) * // WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioSubject field is set to the value of the last call. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { b.FulcioSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go similarity index 65% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go index 65f27edf8ed5..9a0c257b7f25 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go @@ -2,24 +2,24 @@ package v1 -// PKIApplyConfiguration represents a declarative configuration of the PKI type for use +// ImagePolicyPKIRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPKIRootOfTrust type for use // with apply. -type PKIApplyConfiguration struct { +type ImagePolicyPKIRootOfTrustApplyConfiguration struct { CertificateAuthorityRootsData []byte `json:"caRootsData,omitempty"` CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` PKICertificateSubject *PKICertificateSubjectApplyConfiguration `json:"pkiCertificateSubject,omitempty"` } -// PKIApplyConfiguration constructs a declarative configuration of the PKI type for use with +// ImagePolicyPKIRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPKIRootOfTrust type for use with // apply. -func PKI() *PKIApplyConfiguration { - return &PKIApplyConfiguration{} +func ImagePolicyPKIRootOfTrust() *ImagePolicyPKIRootOfTrustApplyConfiguration { + return &ImagePolicyPKIRootOfTrustApplyConfiguration{} } // WithCertificateAuthorityRootsData adds the given value to the CertificateAuthorityRootsData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityRootsData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityRootsData = append(b.CertificateAuthorityRootsData, values[i]) } @@ -29,7 +29,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte // WithCertificateAuthorityIntermediatesData adds the given value to the CertificateAuthorityIntermediatesData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityIntermediatesData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityIntermediatesData = append(b.CertificateAuthorityIntermediatesData, values[i]) } @@ -39,7 +39,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values // WithPKICertificateSubject sets the PKICertificateSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKICertificateSubject field is set to the value of the last call. -func (b *PKIApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *ImagePolicyPKIRootOfTrustApplyConfiguration { b.PKICertificateSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go similarity index 54% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go index c1073e882f69..a14457309773 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go @@ -2,23 +2,23 @@ package v1 -// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use // with apply. -type PublicKeyApplyConfiguration struct { +type ImagePolicyPublicKeyRootOfTrustApplyConfiguration struct { KeyData []byte `json:"keyData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use with // apply. -func PublicKey() *PublicKeyApplyConfiguration { - return &PublicKeyApplyConfiguration{} +func ImagePolicyPublicKeyRootOfTrust() *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { + return &ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} } // WithKeyData adds the given value to the KeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the KeyData field. -func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.KeyData = append(b.KeyData, values[i]) } @@ -28,7 +28,7 @@ func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyAppl // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go index b75165c8d05e..321196469072 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use // with apply. type ImagePolicySpecApplyConfiguration struct { - Scopes []configv1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1.ImageS // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go similarity index 52% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go index 3e29510bf148..6f0d5d2e7cc5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go @@ -2,23 +2,23 @@ package v1 -// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// ImageSigstoreVerificationPolicyApplyConfiguration represents a declarative configuration of the ImageSigstoreVerificationPolicy type for use // with apply. -type PolicyApplyConfiguration struct { +type ImageSigstoreVerificationPolicyApplyConfiguration struct { RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` } -// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// ImageSigstoreVerificationPolicyApplyConfiguration constructs a declarative configuration of the ImageSigstoreVerificationPolicy type for use with // apply. -func Policy() *PolicyApplyConfiguration { - return &PolicyApplyConfiguration{} +func ImageSigstoreVerificationPolicy() *ImageSigstoreVerificationPolicyApplyConfiguration { + return &ImageSigstoreVerificationPolicyApplyConfiguration{} } // WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RootOfTrust field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.RootOfTrust = value return b } @@ -26,7 +26,7 @@ func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApply // WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SignedIdentity field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.SignedIdentity = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go index d7988e5115eb..5c61ef980181 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go @@ -2,6 +2,10 @@ package v1 +import ( + configv1 "github.com/openshift/api/config/v1" +) + // NutanixPlatformStatusApplyConfiguration represents a declarative configuration of the NutanixPlatformStatus type for use // with apply. type NutanixPlatformStatusApplyConfiguration struct { @@ -10,6 +14,7 @@ type NutanixPlatformStatusApplyConfiguration struct { IngressIP *string `json:"ingressIP,omitempty"` IngressIPs []string `json:"ingressIPs,omitempty"` LoadBalancer *NutanixPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + DNSRecordsType *configv1.DNSRecordsType `json:"dnsRecordsType,omitempty"` } // NutanixPlatformStatusApplyConfiguration constructs a declarative configuration of the NutanixPlatformStatus type for use with @@ -61,3 +66,11 @@ func (b *NutanixPlatformStatusApplyConfiguration) WithLoadBalancer(value *Nutani b.LoadBalancer = value return b } + +// WithDNSRecordsType sets the DNSRecordsType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSRecordsType field is set to the value of the last call. +func (b *NutanixPlatformStatusApplyConfiguration) WithDNSRecordsType(value configv1.DNSRecordsType) *NutanixPlatformStatusApplyConfiguration { + b.DNSRecordsType = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go index f06c78e24336..405276948985 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go @@ -16,6 +16,7 @@ type OpenStackPlatformStatusApplyConfiguration struct { IngressIPs []string `json:"ingressIPs,omitempty"` NodeDNSIP *string `json:"nodeDNSIP,omitempty"` LoadBalancer *OpenStackPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + DNSRecordsType *configv1.DNSRecordsType `json:"dnsRecordsType,omitempty"` MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } @@ -85,6 +86,14 @@ func (b *OpenStackPlatformStatusApplyConfiguration) WithLoadBalancer(value *Open return b } +// WithDNSRecordsType sets the DNSRecordsType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSRecordsType field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithDNSRecordsType(value configv1.DNSRecordsType) *OpenStackPlatformStatusApplyConfiguration { + b.DNSRecordsType = &value + return b +} + // WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MachineNetworks field. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go index 18ad5d849201..dab2c7a101f6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go @@ -2,6 +2,10 @@ package v1 +import ( + configv1 "github.com/openshift/api/config/v1" +) + // OvirtPlatformStatusApplyConfiguration represents a declarative configuration of the OvirtPlatformStatus type for use // with apply. type OvirtPlatformStatusApplyConfiguration struct { @@ -11,6 +15,7 @@ type OvirtPlatformStatusApplyConfiguration struct { IngressIPs []string `json:"ingressIPs,omitempty"` NodeDNSIP *string `json:"nodeDNSIP,omitempty"` LoadBalancer *OvirtPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + DNSRecordsType *configv1.DNSRecordsType `json:"dnsRecordsType,omitempty"` } // OvirtPlatformStatusApplyConfiguration constructs a declarative configuration of the OvirtPlatformStatus type for use with @@ -70,3 +75,11 @@ func (b *OvirtPlatformStatusApplyConfiguration) WithLoadBalancer(value *OvirtPla b.LoadBalancer = value return b } + +// WithDNSRecordsType sets the DNSRecordsType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSRecordsType field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithDNSRecordsType(value configv1.DNSRecordsType) *OvirtPlatformStatusApplyConfiguration { + b.DNSRecordsType = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go index f1ff91ffbd67..6b3e46f473c8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go @@ -9,10 +9,10 @@ import ( // PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use // with apply. type PolicyRootOfTrustApplyConfiguration struct { - PolicyType *configv1.PolicyType `json:"policyType,omitempty"` - PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` - FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` - PKI *PKIApplyConfiguration `json:"pki,omitempty"` + PolicyType *configv1.PolicyType `json:"policyType,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrustApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` + PKI *ImagePolicyPKIRootOfTrustApplyConfiguration `json:"pki,omitempty"` } // PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with @@ -32,7 +32,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1.Poli // WithPublicKey sets the PublicKey field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PublicKey field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PublicKey = value return b } @@ -40,7 +40,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyAppl // WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.FulcioCAWithRekor = value return b } @@ -48,7 +48,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *Fulci // WithPKI sets the PKI field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKI field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *PKIApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *ImagePolicyPKIRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PKI = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go index 36696df716d2..a3cfc9b1c7c3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go @@ -15,6 +15,7 @@ type VSpherePlatformStatusApplyConfiguration struct { IngressIPs []string `json:"ingressIPs,omitempty"` NodeDNSIP *string `json:"nodeDNSIP,omitempty"` LoadBalancer *VSpherePlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` + DNSRecordsType *configv1.DNSRecordsType `json:"dnsRecordsType,omitempty"` MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } @@ -76,6 +77,14 @@ func (b *VSpherePlatformStatusApplyConfiguration) WithLoadBalancer(value *VSpher return b } +// WithDNSRecordsType sets the DNSRecordsType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DNSRecordsType field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithDNSRecordsType(value configv1.DNSRecordsType) *VSpherePlatformStatusApplyConfiguration { + b.DNSRecordsType = &value + return b +} + // WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the MachineNetworks field. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go index e4a3470c4573..e1c4c630ea30 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use // with apply. type ClusterImagePolicySpecApplyConfiguration struct { - Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1 // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go similarity index 57% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go index 2a907a7e9729..2fcaa362153e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go @@ -2,24 +2,24 @@ package v1alpha1 -// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use // with apply. -type FulcioCAWithRekorApplyConfiguration struct { +type ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration struct { FulcioCAData []byte `json:"fulcioCAData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` } -// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use with // apply. -func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { - return &FulcioCAWithRekorApplyConfiguration{} +func ImagePolicyFulcioCAWithRekorRootOfTrust() *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { + return &ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} } // WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the FulcioCAData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioCAData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.FulcioCAData = append(b.FulcioCAData, values[i]) } @@ -29,7 +29,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) * // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } @@ -39,7 +39,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) * // WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioSubject field is set to the value of the last call. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { b.FulcioSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go similarity index 65% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go index 455abe02a2d9..a218867ea918 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go @@ -2,24 +2,24 @@ package v1alpha1 -// PKIApplyConfiguration represents a declarative configuration of the PKI type for use +// ImagePolicyPKIRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPKIRootOfTrust type for use // with apply. -type PKIApplyConfiguration struct { +type ImagePolicyPKIRootOfTrustApplyConfiguration struct { CertificateAuthorityRootsData []byte `json:"caRootsData,omitempty"` CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` PKICertificateSubject *PKICertificateSubjectApplyConfiguration `json:"pkiCertificateSubject,omitempty"` } -// PKIApplyConfiguration constructs a declarative configuration of the PKI type for use with +// ImagePolicyPKIRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPKIRootOfTrust type for use with // apply. -func PKI() *PKIApplyConfiguration { - return &PKIApplyConfiguration{} +func ImagePolicyPKIRootOfTrust() *ImagePolicyPKIRootOfTrustApplyConfiguration { + return &ImagePolicyPKIRootOfTrustApplyConfiguration{} } // WithCertificateAuthorityRootsData adds the given value to the CertificateAuthorityRootsData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityRootsData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityRootsData = append(b.CertificateAuthorityRootsData, values[i]) } @@ -29,7 +29,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte // WithCertificateAuthorityIntermediatesData adds the given value to the CertificateAuthorityIntermediatesData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityIntermediatesData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityIntermediatesData = append(b.CertificateAuthorityIntermediatesData, values[i]) } @@ -39,7 +39,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values // WithPKICertificateSubject sets the PKICertificateSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKICertificateSubject field is set to the value of the last call. -func (b *PKIApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *ImagePolicyPKIRootOfTrustApplyConfiguration { b.PKICertificateSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go similarity index 54% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go index 91665a90b740..22513de62844 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go @@ -2,23 +2,23 @@ package v1alpha1 -// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use // with apply. -type PublicKeyApplyConfiguration struct { +type ImagePolicyPublicKeyRootOfTrustApplyConfiguration struct { KeyData []byte `json:"keyData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use with // apply. -func PublicKey() *PublicKeyApplyConfiguration { - return &PublicKeyApplyConfiguration{} +func ImagePolicyPublicKeyRootOfTrust() *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { + return &ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} } // WithKeyData adds the given value to the KeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the KeyData field. -func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.KeyData = append(b.KeyData, values[i]) } @@ -28,7 +28,7 @@ func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyAppl // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go index ac08e9cf4e53..84969b600dde 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use // with apply. type ImagePolicySpecApplyConfiguration struct { - Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1. // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go similarity index 52% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go index 61e485664294..64f9760e8b62 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go @@ -2,23 +2,23 @@ package v1alpha1 -// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// ImageSigstoreVerificationPolicyApplyConfiguration represents a declarative configuration of the ImageSigstoreVerificationPolicy type for use // with apply. -type PolicyApplyConfiguration struct { +type ImageSigstoreVerificationPolicyApplyConfiguration struct { RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` } -// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// ImageSigstoreVerificationPolicyApplyConfiguration constructs a declarative configuration of the ImageSigstoreVerificationPolicy type for use with // apply. -func Policy() *PolicyApplyConfiguration { - return &PolicyApplyConfiguration{} +func ImageSigstoreVerificationPolicy() *ImageSigstoreVerificationPolicyApplyConfiguration { + return &ImageSigstoreVerificationPolicyApplyConfiguration{} } // WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RootOfTrust field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.RootOfTrust = value return b } @@ -26,7 +26,7 @@ func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApply // WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SignedIdentity field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.SignedIdentity = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go index 5de792be6381..5122c82e0bae 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go @@ -9,10 +9,10 @@ import ( // PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use // with apply. type PolicyRootOfTrustApplyConfiguration struct { - PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` - PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` - FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` - PKI *PKIApplyConfiguration `json:"pki,omitempty"` + PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrustApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` + PKI *ImagePolicyPKIRootOfTrustApplyConfiguration `json:"pki,omitempty"` } // PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with @@ -32,7 +32,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1alpha // WithPublicKey sets the PublicKey field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PublicKey field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PublicKey = value return b } @@ -40,7 +40,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyAppl // WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.FulcioCAWithRekor = value return b } @@ -48,7 +48,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *Fulci // WithPKI sets the PKI field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKI field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *PKIApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *ImagePolicyPKIRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PKI = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index de325e3678eb..f00417a5c6e5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -164,6 +164,10 @@ var schemaYAML = typed.YAMLObject(`types: namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig default: dnsType: PlatformDefault + - name: ipFamily + type: + scalar: string + default: IPv4 - name: region type: scalar: string @@ -363,6 +367,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: cloudName type: scalar: string + - name: ipFamily + type: + scalar: string + default: IPv4 - name: networkResourceGroupName type: scalar: string @@ -429,6 +437,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: dnsRecordsType + type: + scalar: string - name: ingressIP type: scalar: string @@ -630,7 +641,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1.Policy + namedType: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -1329,19 +1340,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - version -- name: com.github.openshift.api.config.v1.FulcioCAWithRekor - map: - fields: - - name: fulcioCAData - type: - scalar: string - - name: fulcioSubject - type: - namedType: com.github.openshift.api.config.v1.PolicyFulcioSubject - default: {} - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1.GCPPlatformSpec map: elementType: @@ -1386,14 +1384,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - key - - name: serviceEndpoints - type: - list: - elementType: - namedType: com.github.openshift.api.config.v1.GCPServiceEndpoint - elementRelationship: associative - keys: - - name - name: com.github.openshift.api.config.v1.GCPResourceLabel map: fields: @@ -1420,17 +1410,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: com.github.openshift.api.config.v1.GCPServiceEndpoint - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: url - type: - scalar: string - default: "" - name: com.github.openshift.api.config.v1.GatherConfig map: fields: @@ -1791,12 +1770,47 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.ImagePolicyStatus default: {} +- name: com.github.openshift.api.config.v1.ImagePolicyFulcioCAWithRekorRootOfTrust + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1.ImagePolicyPKIRootOfTrust + map: + fields: + - name: caIntermediatesData + type: + scalar: string + - name: caRootsData + type: + scalar: string + - name: pkiCertificateSubject + type: + namedType: com.github.openshift.api.config.v1.PKICertificateSubject + default: {} +- name: com.github.openshift.api.config.v1.ImagePolicyPublicKeyRootOfTrust + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1.ImagePolicySpec map: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1.Policy + namedType: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -1815,6 +1829,16 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1.PolicyIdentity - name: com.github.openshift.api.config.v1.ImageSpec map: fields: @@ -2499,6 +2523,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative + - name: dnsRecordsType + type: + scalar: string - name: ingressIP type: scalar: string @@ -2860,6 +2887,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: cloudName type: scalar: string + - name: dnsRecordsType + type: + scalar: string - name: ingressIP type: scalar: string @@ -2969,6 +2999,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative + - name: dnsRecordsType + type: + scalar: string - name: ingressIP type: scalar: string @@ -2986,19 +3019,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: nodeDNSIP type: scalar: string -- name: com.github.openshift.api.config.v1.PKI - map: - fields: - - name: caIntermediatesData - type: - scalar: string - - name: caRootsData - type: - scalar: string - - name: pkiCertificateSubject - type: - namedType: com.github.openshift.api.config.v1.PKICertificateSubject - default: {} - name: com.github.openshift.api.config.v1.PKICertificateSubject map: fields: @@ -3122,16 +3142,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: vsphere type: namedType: com.github.openshift.api.config.v1.VSpherePlatformStatus -- name: com.github.openshift.api.config.v1.Policy - map: - fields: - - name: rootOfTrust - type: - namedType: com.github.openshift.api.config.v1.PolicyRootOfTrust - default: {} - - name: signedIdentity - type: - namedType: com.github.openshift.api.config.v1.PolicyIdentity - name: com.github.openshift.api.config.v1.PolicyFulcioSubject map: fields: @@ -3186,17 +3196,17 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: fulcioCAWithRekor type: - namedType: com.github.openshift.api.config.v1.FulcioCAWithRekor + namedType: com.github.openshift.api.config.v1.ImagePolicyFulcioCAWithRekorRootOfTrust - name: pki type: - namedType: com.github.openshift.api.config.v1.PKI + namedType: com.github.openshift.api.config.v1.ImagePolicyPKIRootOfTrust - name: policyType type: scalar: string default: "" - name: publicKey type: - namedType: com.github.openshift.api.config.v1.PublicKey + namedType: com.github.openshift.api.config.v1.ImagePolicyPublicKeyRootOfTrust unions: - discriminator: policyType fields: @@ -3381,15 +3391,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: noProxy type: scalar: string -- name: com.github.openshift.api.config.v1.PublicKey - map: - fields: - - name: keyData - type: - scalar: string - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1.RegistryLocation map: fields: @@ -3970,6 +3971,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: dnsRecordsType + type: + scalar: string - name: ingressIP type: scalar: string @@ -4171,7 +4175,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1alpha1.Policy + namedType: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -4269,19 +4273,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor - map: - fields: - - name: fulcioCAData - type: - scalar: string - - name: fulcioSubject - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject - default: {} - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1alpha1.GatherConfig map: fields: @@ -4318,12 +4309,47 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus default: {} +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrust + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyPKIRootOfTrust + map: + fields: + - name: caIntermediatesData + type: + scalar: string + - name: caRootsData + type: + scalar: string + - name: pkiCertificateSubject + type: + namedType: com.github.openshift.api.config.v1alpha1.PKICertificateSubject + default: {} +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyPublicKeyRootOfTrust + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1alpha1.ImagePolicySpec map: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1alpha1.Policy + namedType: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -4342,6 +4368,17 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity + default: {} - name: com.github.openshift.api.config.v1alpha1.InsightsDataGather map: fields: @@ -4420,19 +4457,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: verbosity type: scalar: string -- name: com.github.openshift.api.config.v1alpha1.PKI - map: - fields: - - name: caIntermediatesData - type: - scalar: string - - name: caRootsData - type: - scalar: string - - name: pkiCertificateSubject - type: - namedType: com.github.openshift.api.config.v1alpha1.PKICertificateSubject - default: {} - name: com.github.openshift.api.config.v1alpha1.PKICertificateSubject map: fields: @@ -4459,17 +4483,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: mountPath type: scalar: string -- name: com.github.openshift.api.config.v1alpha1.Policy - map: - fields: - - name: rootOfTrust - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust - default: {} - - name: signedIdentity - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity - default: {} - name: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject map: fields: @@ -4524,17 +4537,17 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: fulcioCAWithRekor type: - namedType: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrust - name: pki type: - namedType: com.github.openshift.api.config.v1alpha1.PKI + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyPKIRootOfTrust - name: policyType type: scalar: string default: "" - name: publicKey type: - namedType: com.github.openshift.api.config.v1alpha1.PublicKey + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyPublicKeyRootOfTrust unions: - discriminator: policyType fields: @@ -4544,15 +4557,6 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: PKI - fieldName: publicKey discriminatorValue: PublicKey -- name: com.github.openshift.api.config.v1alpha1.PublicKey - map: - fields: - - name: keyData - type: - scalar: string - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig map: fields: diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go index a0fd1005ca70..99cad8e204e1 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go @@ -172,8 +172,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.FeatureGateSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("FeatureGateStatus"): return &configv1.FeatureGateStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("FulcioCAWithRekor"): - return &configv1.FulcioCAWithRekorApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GatherConfig"): return &configv1.GatherConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GathererConfig"): @@ -186,8 +184,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.GCPResourceLabelApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GCPResourceTag"): return &configv1.GCPResourceTagApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("GCPServiceEndpoint"): - return &configv1.GCPServiceEndpointApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GitHubIdentityProvider"): return &configv1.GitHubIdentityProviderApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GitLabIdentityProvider"): @@ -226,10 +222,18 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.ImageLabelApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicy"): return &configv1.ImagePolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyFulcioCAWithRekorRootOfTrust"): + return &configv1.ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyPKIRootOfTrust"): + return &configv1.ImagePolicyPKIRootOfTrustApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyPublicKeyRootOfTrust"): + return &configv1.ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicySpec"): return &configv1.ImagePolicySpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicyStatus"): return &configv1.ImagePolicyStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageSigstoreVerificationPolicy"): + return &configv1.ImageSigstoreVerificationPolicyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImageSpec"): return &configv1.ImageSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImageStatus"): @@ -354,16 +358,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.PersistentVolumeClaimReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PersistentVolumeConfig"): return &configv1.PersistentVolumeConfigApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("PKI"): - return &configv1.PKIApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PKICertificateSubject"): return &configv1.PKICertificateSubjectApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PlatformSpec"): return &configv1.PlatformSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PlatformStatus"): return &configv1.PlatformStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("Policy"): - return &configv1.PolicyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PolicyFulcioSubject"): return &configv1.PolicyFulcioSubjectApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PolicyIdentity"): @@ -396,8 +396,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.ProxySpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ProxyStatus"): return &configv1.ProxyStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("PublicKey"): - return &configv1.PublicKeyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("RegistryLocation"): return &configv1.RegistryLocationApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("RegistrySources"): @@ -498,16 +496,22 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.ContainerResourceApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("EtcdBackupSpec"): return &configv1alpha1.EtcdBackupSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("FulcioCAWithRekor"): - return &configv1alpha1.FulcioCAWithRekorApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("GatherConfig"): return &configv1alpha1.GatherConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicy"): return &configv1alpha1.ImagePolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyFulcioCAWithRekorRootOfTrust"): + return &configv1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyPKIRootOfTrust"): + return &configv1alpha1.ImagePolicyPKIRootOfTrustApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyPublicKeyRootOfTrust"): + return &configv1alpha1.ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicySpec"): return &configv1alpha1.ImagePolicySpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyStatus"): return &configv1alpha1.ImagePolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImageSigstoreVerificationPolicy"): + return &configv1alpha1.ImageSigstoreVerificationPolicyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGather"): return &configv1alpha1.InsightsDataGatherApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGatherSpec"): @@ -518,12 +522,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.PersistentVolumeClaimReferenceApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PersistentVolumeConfig"): return &configv1alpha1.PersistentVolumeConfigApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PKI"): - return &configv1alpha1.PKIApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PKICertificateSubject"): return &configv1alpha1.PKICertificateSubjectApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Policy"): - return &configv1alpha1.PolicyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyFulcioSubject"): return &configv1alpha1.PolicyFulcioSubjectApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyIdentity"): @@ -534,8 +534,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.PolicyMatchRemapIdentityApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyRootOfTrust"): return &configv1alpha1.PolicyRootOfTrustApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PublicKey"): - return &configv1alpha1.PublicKeyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("RetentionNumberConfig"): return &configv1alpha1.RetentionNumberConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("RetentionPolicy"): diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go index dfbf8d8a961e..e33d9fa11e83 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go @@ -113,6 +113,80 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImage + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageStatus + default: {} +- name: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageBundleStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: image + type: + scalar: string + - name: name + type: + scalar: string +- name: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageRef + map: + fields: + - name: name + type: + scalar: string +- name: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageSpec + map: + fields: + - name: releases + type: + list: + elementType: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageRef + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: releases + type: + list: + elementType: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImageBundleStatus + elementRelationship: associative + keys: + - name - name: com.github.openshift.api.machineconfiguration.v1alpha1.MCOObjectReference map: fields: @@ -219,13 +293,70 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStream + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamSpec + - name: status + type: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamStatus + default: {} +- name: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamSet + map: + fields: + - name: name + type: + scalar: string + - name: osExtensionsImage + type: + scalar: string + - name: osImage + type: + scalar: string +- name: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamSpec + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamStatus + map: + fields: + - name: availableStreams + type: + list: + elementType: + namedType: com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStreamSet + elementRelationship: associative + keys: + - name + - name: defaultStream + type: + scalar: string - name: com.github.openshift.api.machineconfiguration.v1alpha1.PinnedImageRef map: fields: - name: name type: scalar: string - default: "" - name: com.github.openshift.api.machineconfiguration.v1alpha1.PinnedImageSet map: fields: diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go index 0052db33ea53..a96fd0e2e526 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatus.go @@ -15,6 +15,7 @@ type MachineConfigNodeStatusApplyConfiguration struct { ConfigImage *MachineConfigNodeStatusConfigImageApplyConfiguration `json:"configImage,omitempty"` PinnedImageSets []MachineConfigNodeStatusPinnedImageSetApplyConfiguration `json:"pinnedImageSets,omitempty"` IrreconcilableChanges []IrreconcilableChangeDiffApplyConfiguration `json:"irreconcilableChanges,omitempty"` + InternalReleaseImage *MachineConfigNodeStatusInternalReleaseImageApplyConfiguration `json:"internalReleaseImage,omitempty"` } // MachineConfigNodeStatusApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatus type for use with @@ -85,3 +86,11 @@ func (b *MachineConfigNodeStatusApplyConfiguration) WithIrreconcilableChanges(va } return b } + +// WithInternalReleaseImage sets the InternalReleaseImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InternalReleaseImage field is set to the value of the last call. +func (b *MachineConfigNodeStatusApplyConfiguration) WithInternalReleaseImage(value *MachineConfigNodeStatusInternalReleaseImageApplyConfiguration) *MachineConfigNodeStatusApplyConfiguration { + b.InternalReleaseImage = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimage.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimage.go new file mode 100644 index 000000000000..e9f40f6d3bcc --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimage.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigNodeStatusInternalReleaseImageApplyConfiguration represents a declarative configuration of the MachineConfigNodeStatusInternalReleaseImage type for use +// with apply. +type MachineConfigNodeStatusInternalReleaseImageApplyConfiguration struct { + Releases []MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration `json:"releases,omitempty"` +} + +// MachineConfigNodeStatusInternalReleaseImageApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatusInternalReleaseImage type for use with +// apply. +func MachineConfigNodeStatusInternalReleaseImage() *MachineConfigNodeStatusInternalReleaseImageApplyConfiguration { + return &MachineConfigNodeStatusInternalReleaseImageApplyConfiguration{} +} + +// WithReleases adds the given value to the Releases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Releases field. +func (b *MachineConfigNodeStatusInternalReleaseImageApplyConfiguration) WithReleases(values ...*MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration) *MachineConfigNodeStatusInternalReleaseImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReleases") + } + b.Releases = append(b.Releases, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimageref.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimageref.go new file mode 100644 index 000000000000..98003dce3de7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfignodestatusinternalreleaseimageref.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration represents a declarative configuration of the MachineConfigNodeStatusInternalReleaseImageRef type for use +// with apply. +type MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration struct { + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` +} + +// MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration constructs a declarative configuration of the MachineConfigNodeStatusInternalReleaseImageRef type for use with +// apply. +func MachineConfigNodeStatusInternalReleaseImageRef() *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration { + return &MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration) WithName(value string) *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration { + b.Name = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration) WithImage(value string) *MachineConfigNodeStatusInternalReleaseImageRefApplyConfiguration { + b.Image = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolspec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolspec.go index 1b3d4db608b5..3b44dbf9410a 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolspec.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolspec.go @@ -16,6 +16,7 @@ type MachineConfigPoolSpecApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` Configuration *MachineConfigPoolStatusConfigurationApplyConfiguration `json:"configuration,omitempty"` PinnedImageSets []PinnedImageSetRefApplyConfiguration `json:"pinnedImageSets,omitempty"` + OSImageStream *OSImageStreamReferenceApplyConfiguration `json:"osImageStream,omitempty"` } // MachineConfigPoolSpecApplyConfiguration constructs a declarative configuration of the MachineConfigPoolSpec type for use with @@ -76,3 +77,11 @@ func (b *MachineConfigPoolSpecApplyConfiguration) WithPinnedImageSets(values ... } return b } + +// WithOSImageStream sets the OSImageStream field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OSImageStream field is set to the value of the last call. +func (b *MachineConfigPoolSpecApplyConfiguration) WithOSImageStream(value *OSImageStreamReferenceApplyConfiguration) *MachineConfigPoolSpecApplyConfiguration { + b.OSImageStream = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go index d92c22c33c04..0df351e27882 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go @@ -15,6 +15,7 @@ type MachineConfigPoolStatusApplyConfiguration struct { Conditions []MachineConfigPoolConditionApplyConfiguration `json:"conditions,omitempty"` CertExpirys []CertExpiryApplyConfiguration `json:"certExpirys,omitempty"` PoolSynchronizersStatus []PoolSynchronizerStatusApplyConfiguration `json:"poolSynchronizersStatus,omitempty"` + OSImageStream *OSImageStreamReferenceApplyConfiguration `json:"osImageStream,omitempty"` } // MachineConfigPoolStatusApplyConfiguration constructs a declarative configuration of the MachineConfigPoolStatus type for use with @@ -117,3 +118,11 @@ func (b *MachineConfigPoolStatusApplyConfiguration) WithPoolSynchronizersStatus( } return b } + +// WithOSImageStream sets the OSImageStream field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OSImageStream field is set to the value of the last call. +func (b *MachineConfigPoolStatusApplyConfiguration) WithOSImageStream(value *OSImageStreamReferenceApplyConfiguration) *MachineConfigPoolStatusApplyConfiguration { + b.OSImageStream = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/osimagestreamreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/osimagestreamreference.go new file mode 100644 index 000000000000..f5e96a1edf85 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/osimagestreamreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OSImageStreamReferenceApplyConfiguration represents a declarative configuration of the OSImageStreamReference type for use +// with apply. +type OSImageStreamReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// OSImageStreamReferenceApplyConfiguration constructs a declarative configuration of the OSImageStreamReference type for use with +// apply. +func OSImageStreamReference() *OSImageStreamReferenceApplyConfiguration { + return &OSImageStreamReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OSImageStreamReferenceApplyConfiguration) WithName(value string) *OSImageStreamReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimage.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimage.go new file mode 100644 index 000000000000..0c512503db52 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimage.go @@ -0,0 +1,263 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InternalReleaseImageApplyConfiguration represents a declarative configuration of the InternalReleaseImage type for use +// with apply. +type InternalReleaseImageApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InternalReleaseImageSpecApplyConfiguration `json:"spec,omitempty"` + Status *InternalReleaseImageStatusApplyConfiguration `json:"status,omitempty"` +} + +// InternalReleaseImage constructs a declarative configuration of the InternalReleaseImage type for use with +// apply. +func InternalReleaseImage(name string) *InternalReleaseImageApplyConfiguration { + b := &InternalReleaseImageApplyConfiguration{} + b.WithName(name) + b.WithKind("InternalReleaseImage") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b +} + +// ExtractInternalReleaseImage extracts the applied configuration owned by fieldManager from +// internalReleaseImage. If no managedFields are found in internalReleaseImage for fieldManager, a +// InternalReleaseImageApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// internalReleaseImage must be a unmodified InternalReleaseImage API object that was retrieved from the Kubernetes API. +// ExtractInternalReleaseImage provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInternalReleaseImage(internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, fieldManager string) (*InternalReleaseImageApplyConfiguration, error) { + return extractInternalReleaseImage(internalReleaseImage, fieldManager, "") +} + +// ExtractInternalReleaseImageStatus is the same as ExtractInternalReleaseImage except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInternalReleaseImageStatus(internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, fieldManager string) (*InternalReleaseImageApplyConfiguration, error) { + return extractInternalReleaseImage(internalReleaseImage, fieldManager, "status") +} + +func extractInternalReleaseImage(internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, fieldManager string, subresource string) (*InternalReleaseImageApplyConfiguration, error) { + b := &InternalReleaseImageApplyConfiguration{} + err := managedfields.ExtractInto(internalReleaseImage, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1alpha1.InternalReleaseImage"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(internalReleaseImage.Name) + + b.WithKind("InternalReleaseImage") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b, nil +} +func (b InternalReleaseImageApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithKind(value string) *InternalReleaseImageApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithAPIVersion(value string) *InternalReleaseImageApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithName(value string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithGenerateName(value string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithNamespace(value string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithUID(value types.UID) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithResourceVersion(value string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithGeneration(value int64) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InternalReleaseImageApplyConfiguration) WithLabels(entries map[string]string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InternalReleaseImageApplyConfiguration) WithAnnotations(entries map[string]string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InternalReleaseImageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InternalReleaseImageApplyConfiguration) WithFinalizers(values ...string) *InternalReleaseImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InternalReleaseImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithSpec(value *InternalReleaseImageSpecApplyConfiguration) *InternalReleaseImageApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InternalReleaseImageApplyConfiguration) WithStatus(value *InternalReleaseImageStatusApplyConfiguration) *InternalReleaseImageApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InternalReleaseImageApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InternalReleaseImageApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InternalReleaseImageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InternalReleaseImageApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagebundlestatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagebundlestatus.go new file mode 100644 index 000000000000..acfda1ee8869 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagebundlestatus.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InternalReleaseImageBundleStatusApplyConfiguration represents a declarative configuration of the InternalReleaseImageBundleStatus type for use +// with apply. +type InternalReleaseImageBundleStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` +} + +// InternalReleaseImageBundleStatusApplyConfiguration constructs a declarative configuration of the InternalReleaseImageBundleStatus type for use with +// apply. +func InternalReleaseImageBundleStatus() *InternalReleaseImageBundleStatusApplyConfiguration { + return &InternalReleaseImageBundleStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *InternalReleaseImageBundleStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *InternalReleaseImageBundleStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InternalReleaseImageBundleStatusApplyConfiguration) WithName(value string) *InternalReleaseImageBundleStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *InternalReleaseImageBundleStatusApplyConfiguration) WithImage(value string) *InternalReleaseImageBundleStatusApplyConfiguration { + b.Image = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimageref.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimageref.go new file mode 100644 index 000000000000..8a04846a5413 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimageref.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// InternalReleaseImageRefApplyConfiguration represents a declarative configuration of the InternalReleaseImageRef type for use +// with apply. +type InternalReleaseImageRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// InternalReleaseImageRefApplyConfiguration constructs a declarative configuration of the InternalReleaseImageRef type for use with +// apply. +func InternalReleaseImageRef() *InternalReleaseImageRefApplyConfiguration { + return &InternalReleaseImageRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InternalReleaseImageRefApplyConfiguration) WithName(value string) *InternalReleaseImageRefApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagespec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagespec.go new file mode 100644 index 000000000000..2ddb39c43fd9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagespec.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// InternalReleaseImageSpecApplyConfiguration represents a declarative configuration of the InternalReleaseImageSpec type for use +// with apply. +type InternalReleaseImageSpecApplyConfiguration struct { + Releases []InternalReleaseImageRefApplyConfiguration `json:"releases,omitempty"` +} + +// InternalReleaseImageSpecApplyConfiguration constructs a declarative configuration of the InternalReleaseImageSpec type for use with +// apply. +func InternalReleaseImageSpec() *InternalReleaseImageSpecApplyConfiguration { + return &InternalReleaseImageSpecApplyConfiguration{} +} + +// WithReleases adds the given value to the Releases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Releases field. +func (b *InternalReleaseImageSpecApplyConfiguration) WithReleases(values ...*InternalReleaseImageRefApplyConfiguration) *InternalReleaseImageSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReleases") + } + b.Releases = append(b.Releases, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagestatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagestatus.go new file mode 100644 index 000000000000..e5a3483a35ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/internalreleaseimagestatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InternalReleaseImageStatusApplyConfiguration represents a declarative configuration of the InternalReleaseImageStatus type for use +// with apply. +type InternalReleaseImageStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Releases []InternalReleaseImageBundleStatusApplyConfiguration `json:"releases,omitempty"` +} + +// InternalReleaseImageStatusApplyConfiguration constructs a declarative configuration of the InternalReleaseImageStatus type for use with +// apply. +func InternalReleaseImageStatus() *InternalReleaseImageStatusApplyConfiguration { + return &InternalReleaseImageStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *InternalReleaseImageStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *InternalReleaseImageStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithReleases adds the given value to the Releases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Releases field. +func (b *InternalReleaseImageStatusApplyConfiguration) WithReleases(values ...*InternalReleaseImageBundleStatusApplyConfiguration) *InternalReleaseImageStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReleases") + } + b.Releases = append(b.Releases, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestream.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestream.go new file mode 100644 index 000000000000..d8cb0a9d3e26 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestream.go @@ -0,0 +1,263 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OSImageStreamApplyConfiguration represents a declarative configuration of the OSImageStream type for use +// with apply. +type OSImageStreamApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *machineconfigurationv1alpha1.OSImageStreamSpec `json:"spec,omitempty"` + Status *OSImageStreamStatusApplyConfiguration `json:"status,omitempty"` +} + +// OSImageStream constructs a declarative configuration of the OSImageStream type for use with +// apply. +func OSImageStream(name string) *OSImageStreamApplyConfiguration { + b := &OSImageStreamApplyConfiguration{} + b.WithName(name) + b.WithKind("OSImageStream") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b +} + +// ExtractOSImageStream extracts the applied configuration owned by fieldManager from +// oSImageStream. If no managedFields are found in oSImageStream for fieldManager, a +// OSImageStreamApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oSImageStream must be a unmodified OSImageStream API object that was retrieved from the Kubernetes API. +// ExtractOSImageStream provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOSImageStream(oSImageStream *machineconfigurationv1alpha1.OSImageStream, fieldManager string) (*OSImageStreamApplyConfiguration, error) { + return extractOSImageStream(oSImageStream, fieldManager, "") +} + +// ExtractOSImageStreamStatus is the same as ExtractOSImageStream except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOSImageStreamStatus(oSImageStream *machineconfigurationv1alpha1.OSImageStream, fieldManager string) (*OSImageStreamApplyConfiguration, error) { + return extractOSImageStream(oSImageStream, fieldManager, "status") +} + +func extractOSImageStream(oSImageStream *machineconfigurationv1alpha1.OSImageStream, fieldManager string, subresource string) (*OSImageStreamApplyConfiguration, error) { + b := &OSImageStreamApplyConfiguration{} + err := managedfields.ExtractInto(oSImageStream, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1alpha1.OSImageStream"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oSImageStream.Name) + + b.WithKind("OSImageStream") + b.WithAPIVersion("machineconfiguration.openshift.io/v1alpha1") + return b, nil +} +func (b OSImageStreamApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithKind(value string) *OSImageStreamApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithAPIVersion(value string) *OSImageStreamApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithName(value string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithGenerateName(value string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithNamespace(value string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithUID(value types.UID) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithResourceVersion(value string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithGeneration(value int64) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OSImageStreamApplyConfiguration) WithLabels(entries map[string]string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OSImageStreamApplyConfiguration) WithAnnotations(entries map[string]string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OSImageStreamApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OSImageStreamApplyConfiguration) WithFinalizers(values ...string) *OSImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *OSImageStreamApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithSpec(value machineconfigurationv1alpha1.OSImageStreamSpec) *OSImageStreamApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OSImageStreamApplyConfiguration) WithStatus(value *OSImageStreamStatusApplyConfiguration) *OSImageStreamApplyConfiguration { + b.Status = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OSImageStreamApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OSImageStreamApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OSImageStreamApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OSImageStreamApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamset.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamset.go new file mode 100644 index 000000000000..d87886a92052 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamset.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" +) + +// OSImageStreamSetApplyConfiguration represents a declarative configuration of the OSImageStreamSet type for use +// with apply. +type OSImageStreamSetApplyConfiguration struct { + Name *string `json:"name,omitempty"` + OSImage *machineconfigurationv1alpha1.ImageDigestFormat `json:"osImage,omitempty"` + OSExtensionsImage *machineconfigurationv1alpha1.ImageDigestFormat `json:"osExtensionsImage,omitempty"` +} + +// OSImageStreamSetApplyConfiguration constructs a declarative configuration of the OSImageStreamSet type for use with +// apply. +func OSImageStreamSet() *OSImageStreamSetApplyConfiguration { + return &OSImageStreamSetApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OSImageStreamSetApplyConfiguration) WithName(value string) *OSImageStreamSetApplyConfiguration { + b.Name = &value + return b +} + +// WithOSImage sets the OSImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OSImage field is set to the value of the last call. +func (b *OSImageStreamSetApplyConfiguration) WithOSImage(value machineconfigurationv1alpha1.ImageDigestFormat) *OSImageStreamSetApplyConfiguration { + b.OSImage = &value + return b +} + +// WithOSExtensionsImage sets the OSExtensionsImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OSExtensionsImage field is set to the value of the last call. +func (b *OSImageStreamSetApplyConfiguration) WithOSExtensionsImage(value machineconfigurationv1alpha1.ImageDigestFormat) *OSImageStreamSetApplyConfiguration { + b.OSExtensionsImage = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamstatus.go new file mode 100644 index 000000000000..7a06cad58cd6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/osimagestreamstatus.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// OSImageStreamStatusApplyConfiguration represents a declarative configuration of the OSImageStreamStatus type for use +// with apply. +type OSImageStreamStatusApplyConfiguration struct { + AvailableStreams []OSImageStreamSetApplyConfiguration `json:"availableStreams,omitempty"` + DefaultStream *string `json:"defaultStream,omitempty"` +} + +// OSImageStreamStatusApplyConfiguration constructs a declarative configuration of the OSImageStreamStatus type for use with +// apply. +func OSImageStreamStatus() *OSImageStreamStatusApplyConfiguration { + return &OSImageStreamStatusApplyConfiguration{} +} + +// WithAvailableStreams adds the given value to the AvailableStreams field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AvailableStreams field. +func (b *OSImageStreamStatusApplyConfiguration) WithAvailableStreams(values ...*OSImageStreamSetApplyConfiguration) *OSImageStreamStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAvailableStreams") + } + b.AvailableStreams = append(b.AvailableStreams, *values[i]) + } + return b +} + +// WithDefaultStream sets the DefaultStream field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultStream field is set to the value of the last call. +func (b *OSImageStreamStatusApplyConfiguration) WithDefaultStream(value string) *OSImageStreamStatusApplyConfiguration { + b.DefaultStream = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/pinnedimageref.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/pinnedimageref.go index a3c7638db763..2cb17fb72d98 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/pinnedimageref.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1/pinnedimageref.go @@ -2,10 +2,14 @@ package v1alpha1 +import ( + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" +) + // PinnedImageRefApplyConfiguration represents a declarative configuration of the PinnedImageRef type for use // with apply. type PinnedImageRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` + Name *machineconfigurationv1alpha1.ImageDigestFormat `json:"name,omitempty"` } // PinnedImageRefApplyConfiguration constructs a declarative configuration of the PinnedImageRef type for use with @@ -17,7 +21,7 @@ func PinnedImageRef() *PinnedImageRefApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PinnedImageRefApplyConfiguration) WithName(value string) *PinnedImageRefApplyConfiguration { +func (b *PinnedImageRefApplyConfiguration) WithName(value machineconfigurationv1alpha1.ImageDigestFormat) *PinnedImageRefApplyConfiguration { b.Name = &value return b } diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go index f73dfece1fa0..33be9149147d 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/generated_expansion.go @@ -2,6 +2,10 @@ package v1alpha1 +type InternalReleaseImageExpansion interface{} + type MachineConfigNodeExpansion interface{} +type OSImageStreamExpansion interface{} + type PinnedImageSetExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/internalreleaseimage.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/internalreleaseimage.go new file mode 100644 index 000000000000..b99b03685aeb --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/internalreleaseimage.go @@ -0,0 +1,62 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + applyconfigurationsmachineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InternalReleaseImagesGetter has a method to return a InternalReleaseImageInterface. +// A group's client should implement this interface. +type InternalReleaseImagesGetter interface { + InternalReleaseImages() InternalReleaseImageInterface +} + +// InternalReleaseImageInterface has methods to work with InternalReleaseImage resources. +type InternalReleaseImageInterface interface { + Create(ctx context.Context, internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, opts v1.CreateOptions) (*machineconfigurationv1alpha1.InternalReleaseImage, error) + Update(ctx context.Context, internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, opts v1.UpdateOptions) (*machineconfigurationv1alpha1.InternalReleaseImage, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, internalReleaseImage *machineconfigurationv1alpha1.InternalReleaseImage, opts v1.UpdateOptions) (*machineconfigurationv1alpha1.InternalReleaseImage, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*machineconfigurationv1alpha1.InternalReleaseImage, error) + List(ctx context.Context, opts v1.ListOptions) (*machineconfigurationv1alpha1.InternalReleaseImageList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *machineconfigurationv1alpha1.InternalReleaseImage, err error) + Apply(ctx context.Context, internalReleaseImage *applyconfigurationsmachineconfigurationv1alpha1.InternalReleaseImageApplyConfiguration, opts v1.ApplyOptions) (result *machineconfigurationv1alpha1.InternalReleaseImage, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, internalReleaseImage *applyconfigurationsmachineconfigurationv1alpha1.InternalReleaseImageApplyConfiguration, opts v1.ApplyOptions) (result *machineconfigurationv1alpha1.InternalReleaseImage, err error) + InternalReleaseImageExpansion +} + +// internalReleaseImages implements InternalReleaseImageInterface +type internalReleaseImages struct { + *gentype.ClientWithListAndApply[*machineconfigurationv1alpha1.InternalReleaseImage, *machineconfigurationv1alpha1.InternalReleaseImageList, *applyconfigurationsmachineconfigurationv1alpha1.InternalReleaseImageApplyConfiguration] +} + +// newInternalReleaseImages returns a InternalReleaseImages +func newInternalReleaseImages(c *MachineconfigurationV1alpha1Client) *internalReleaseImages { + return &internalReleaseImages{ + gentype.NewClientWithListAndApply[*machineconfigurationv1alpha1.InternalReleaseImage, *machineconfigurationv1alpha1.InternalReleaseImageList, *applyconfigurationsmachineconfigurationv1alpha1.InternalReleaseImageApplyConfiguration]( + "internalreleaseimages", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *machineconfigurationv1alpha1.InternalReleaseImage { + return &machineconfigurationv1alpha1.InternalReleaseImage{} + }, + func() *machineconfigurationv1alpha1.InternalReleaseImageList { + return &machineconfigurationv1alpha1.InternalReleaseImageList{} + }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go index ad6305471c72..70682ef2109e 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/machineconfiguration_client.go @@ -12,7 +12,9 @@ import ( type MachineconfigurationV1alpha1Interface interface { RESTClient() rest.Interface + InternalReleaseImagesGetter MachineConfigNodesGetter + OSImageStreamsGetter PinnedImageSetsGetter } @@ -21,10 +23,18 @@ type MachineconfigurationV1alpha1Client struct { restClient rest.Interface } +func (c *MachineconfigurationV1alpha1Client) InternalReleaseImages() InternalReleaseImageInterface { + return newInternalReleaseImages(c) +} + func (c *MachineconfigurationV1alpha1Client) MachineConfigNodes() MachineConfigNodeInterface { return newMachineConfigNodes(c) } +func (c *MachineconfigurationV1alpha1Client) OSImageStreams() OSImageStreamInterface { + return newOSImageStreams(c) +} + func (c *MachineconfigurationV1alpha1Client) PinnedImageSets() PinnedImageSetInterface { return newPinnedImageSets(c) } diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/osimagestream.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/osimagestream.go new file mode 100644 index 000000000000..386ecb9bb0c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1alpha1/osimagestream.go @@ -0,0 +1,62 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + machineconfigurationv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + applyconfigurationsmachineconfigurationv1alpha1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OSImageStreamsGetter has a method to return a OSImageStreamInterface. +// A group's client should implement this interface. +type OSImageStreamsGetter interface { + OSImageStreams() OSImageStreamInterface +} + +// OSImageStreamInterface has methods to work with OSImageStream resources. +type OSImageStreamInterface interface { + Create(ctx context.Context, oSImageStream *machineconfigurationv1alpha1.OSImageStream, opts v1.CreateOptions) (*machineconfigurationv1alpha1.OSImageStream, error) + Update(ctx context.Context, oSImageStream *machineconfigurationv1alpha1.OSImageStream, opts v1.UpdateOptions) (*machineconfigurationv1alpha1.OSImageStream, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, oSImageStream *machineconfigurationv1alpha1.OSImageStream, opts v1.UpdateOptions) (*machineconfigurationv1alpha1.OSImageStream, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*machineconfigurationv1alpha1.OSImageStream, error) + List(ctx context.Context, opts v1.ListOptions) (*machineconfigurationv1alpha1.OSImageStreamList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *machineconfigurationv1alpha1.OSImageStream, err error) + Apply(ctx context.Context, oSImageStream *applyconfigurationsmachineconfigurationv1alpha1.OSImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *machineconfigurationv1alpha1.OSImageStream, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, oSImageStream *applyconfigurationsmachineconfigurationv1alpha1.OSImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *machineconfigurationv1alpha1.OSImageStream, err error) + OSImageStreamExpansion +} + +// oSImageStreams implements OSImageStreamInterface +type oSImageStreams struct { + *gentype.ClientWithListAndApply[*machineconfigurationv1alpha1.OSImageStream, *machineconfigurationv1alpha1.OSImageStreamList, *applyconfigurationsmachineconfigurationv1alpha1.OSImageStreamApplyConfiguration] +} + +// newOSImageStreams returns a OSImageStreams +func newOSImageStreams(c *MachineconfigurationV1alpha1Client) *oSImageStreams { + return &oSImageStreams{ + gentype.NewClientWithListAndApply[*machineconfigurationv1alpha1.OSImageStream, *machineconfigurationv1alpha1.OSImageStreamList, *applyconfigurationsmachineconfigurationv1alpha1.OSImageStreamApplyConfiguration]( + "osimagestreams", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *machineconfigurationv1alpha1.OSImageStream { + return &machineconfigurationv1alpha1.OSImageStream{} + }, + func() *machineconfigurationv1alpha1.OSImageStreamList { + return &machineconfigurationv1alpha1.OSImageStreamList{} + }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index fa2661459e24..d2451c5723e8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -1860,6 +1860,10 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.operator.v1.ClientTLS default: {} + - name: closedClientConnectionPolicy + type: + scalar: string + default: Continue - name: defaultCertificate type: namedType: io.k8s.api.core.v1.LocalObjectReference @@ -1974,6 +1978,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: healthCheckInterval type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration + - name: httpKeepAliveTimeout + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration - name: maxConnections type: scalar: numeric diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go index ae23fe636a81..ff82e0ed1800 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go @@ -13,24 +13,25 @@ import ( // IngressControllerSpecApplyConfiguration represents a declarative configuration of the IngressControllerSpec type for use // with apply. type IngressControllerSpecApplyConfiguration struct { - Domain *string `json:"domain,omitempty"` - HttpErrorCodePages *configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` - DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` - NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` - NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` - TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` - ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` - RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` - Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` - HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` - HTTPEmptyRequestsPolicy *operatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` - TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` - UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` - HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` - IdleConnectionTerminationPolicy *operatorv1.IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` + Domain *string `json:"domain,omitempty"` + HttpErrorCodePages *configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` + NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` + RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` + Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` + HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` + HTTPEmptyRequestsPolicy *operatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + IdleConnectionTerminationPolicy *operatorv1.IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` + ClosedClientConnectionPolicy *operatorv1.IngressControllerClosedClientConnectionPolicy `json:"closedClientConnectionPolicy,omitempty"` } // IngressControllerSpecApplyConfiguration constructs a declarative configuration of the IngressControllerSpec type for use with @@ -182,3 +183,11 @@ func (b *IngressControllerSpecApplyConfiguration) WithIdleConnectionTerminationP b.IdleConnectionTerminationPolicy = &value return b } + +// WithClosedClientConnectionPolicy sets the ClosedClientConnectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClosedClientConnectionPolicy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithClosedClientConnectionPolicy(value operatorv1.IngressControllerClosedClientConnectionPolicy) *IngressControllerSpecApplyConfiguration { + b.ClosedClientConnectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go index 122801cf10e5..e7cc8c5ee5bd 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go @@ -18,6 +18,7 @@ type IngressControllerTuningOptionsApplyConfiguration struct { ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"` + HTTPKeepAliveTimeout *metav1.Duration `json:"httpKeepAliveTimeout,omitempty"` TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"` MaxConnections *int32 `json:"maxConnections,omitempty"` @@ -102,6 +103,14 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithConnectTimeout(va return b } +// WithHTTPKeepAliveTimeout sets the HTTPKeepAliveTimeout field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPKeepAliveTimeout field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithHTTPKeepAliveTimeout(value metav1.Duration) *IngressControllerTuningOptionsApplyConfiguration { + b.HTTPKeepAliveTimeout = &value + return b +} + // WithTLSInspectDelay sets the TLSInspectDelay field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TLSInspectDelay field is set to the value of the last call. diff --git a/vendor/modules.txt b/vendor/modules.txt index bf211d78397b..bef6d052a9c2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1449,10 +1449,12 @@ github.com/openshift-kni/commatrix/pkg/matrix-diff github.com/openshift-kni/commatrix/pkg/mcp github.com/openshift-kni/commatrix/pkg/types github.com/openshift-kni/commatrix/pkg/utils -# github.com/openshift/api v0.0.0-20251015095338-264e80a2b6e7 +# github.com/openshift/api v0.0.0-20260116192047-6fb7fdae95fd ## explicit; go 1.24.0 github.com/openshift/api github.com/openshift/api/annotations +github.com/openshift/api/apiextensions +github.com/openshift/api/apiextensions/v1alpha1 github.com/openshift/api/apiserver github.com/openshift/api/apiserver/v1 github.com/openshift/api/apps @@ -1557,7 +1559,7 @@ github.com/openshift/build-machinery-go/make/targets/golang github.com/openshift/build-machinery-go/make/targets/openshift github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts -# github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 +# github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 ## explicit; go 1.24.0 github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1 github.com/openshift/client-go/apiserver/applyconfigurations/internal