From 247e2e9d71810e869fc7912a6de80524a1b52ac1 Mon Sep 17 00:00:00 2001 From: Miciah Masters Date: Fri, 28 Sep 2018 16:03:55 -0400 Subject: [PATCH 1/2] Add openshift/cluster-version-operator dependency * Gopkg.lock: * Gopkg.toml: * vendor/github.com/openshift/cluster-version-operator: Import package in order to have the necessary API types for updating operator status. --- Gopkg.lock | 11 + Gopkg.toml | 4 + .../cluster-version-operator/.gitignore | 4 + .../cluster-version-operator/Dockerfile | 15 + .../cluster-version-operator/Gopkg.lock | 697 ++++++++++++++++++ .../cluster-version-operator/Gopkg.toml | 69 ++ .../cluster-version-operator/LICENSE | 202 +++++ .../openshift/cluster-version-operator/OWNERS | 8 + .../cluster-version-operator/README.md | 45 ++ .../bootstrap/bootstrap-pod.yaml | 44 ++ .../cluster-version-operator/cmd/image.go | 39 + .../cluster-version-operator/cmd/main.go | 36 + .../cluster-version-operator/cmd/render.go | 43 ++ .../cluster-version-operator/cmd/start.go | 255 +++++++ .../cluster-version-operator/cmd/version.go | 29 + .../docs/dev/operators.md | 108 +++ .../cluster-version-operator/hack/build-go.sh | 33 + .../hack/build-image.sh | 25 + .../hack/push-image.sh | 19 + .../hack/update-codegen.sh | 10 + .../hack/update-vendor.sh | 13 + .../hack/verify-codegen.sh | 34 + .../hack/verify-style.sh | 51 ++ .../hack/yamllint-config.yaml | 31 + ...cluster-version-operator_00_namespace.yaml | 7 + ...ter-version-operator_01_cvoconfig.crd.yaml | 27 + ..._00_cluster-version-operator_02_roles.yaml | 11 + ...cluster-version-operator_03_daemonset.yaml | 54 ++ .../cluster-version-operator/lib/manifest.go | 109 +++ .../lib/manifest_test.go | 317 ++++++++ .../lib/resourceapply/apiext.go | 52 ++ .../lib/resourceapply/apireg.go | 30 + .../lib/resourceapply/apps.go | 97 +++ .../lib/resourceapply/batch.go | 31 + .../lib/resourceapply/core.go | 102 +++ .../lib/resourceapply/cv.go | 107 +++ .../lib/resourceapply/rbac.go | 94 +++ .../lib/resourceapply/security.go | 31 + .../lib/resourcebuilder/apiext.go | 77 ++ .../lib/resourcebuilder/apireg.go | 36 + .../lib/resourcebuilder/apps.go | 146 ++++ .../lib/resourcebuilder/batch.go | 83 +++ .../lib/resourcebuilder/core.go | 117 +++ .../lib/resourcebuilder/interface.go | 77 ++ .../lib/resourcebuilder/rbac.go | 117 +++ .../lib/resourcebuilder/register.go | 38 + .../lib/resourcebuilder/security.go | 36 + .../lib/resourcemerge/apiext.go | 18 + .../lib/resourcemerge/apireg.go | 18 + .../lib/resourcemerge/apps.go | 40 + .../lib/resourcemerge/batch.go | 28 + .../lib/resourcemerge/core.go | 503 +++++++++++++ .../lib/resourcemerge/cv.go | 32 + .../lib/resourcemerge/meta.go | 58 ++ .../lib/resourcemerge/meta_test.go | 110 +++ .../lib/resourcemerge/os.go | 95 +++ .../lib/resourcemerge/rbac.go | 54 ++ .../lib/resourcemerge/security.go | 108 +++ .../lib/resourceread/apiext.go | 27 + .../lib/resourceread/apireg.go | 31 + .../lib/resourceread/apps.go | 36 + .../lib/resourceread/batch.go | 27 + .../lib/resourceread/core.go | 52 ++ .../lib/resourceread/image.go | 27 + .../lib/resourceread/rbac.go | 58 ++ .../lib/resourceread/security.go | 27 + .../cluster-version-operator/pkg/apis/apis.go | 7 + .../v1/cluster_id.go | 30 + .../v1/cvoconfig.go | 28 + .../v1/register.go | 45 ++ .../clusterversion.openshift.io/v1/types.go | 73 ++ .../clusterversion.openshift.io/v1/url.go | 22 + .../v1/zz_generated.deepcopy.go | 89 +++ .../v1/register.go | 45 ++ .../operatorstatus.openshift.io/v1/types.go | 106 +++ .../v1/zz_generated.deepcopy.go | 127 ++++ .../pkg/autoupdate/autoupdate.go | 229 ++++++ .../pkg/autoupdate/autoupdate_test.go | 43 ++ .../pkg/cincinnati/cincinnati.go | 127 ++++ .../cluster-version-operator/pkg/cvo/cvo.go | 273 +++++++ .../cluster-version-operator/pkg/cvo/image.go | 23 + .../pkg/cvo/internal/dynamicclient/client.go | 103 +++ .../pkg/cvo/internal/generic.go | 106 +++ .../pkg/cvo/internal/operatorstatus.go | 115 +++ .../pkg/cvo/render.go | 114 +++ .../pkg/cvo/status.go | 131 ++++ .../cluster-version-operator/pkg/cvo/sync.go | 162 ++++ .../pkg/cvo/updatepayload.go | 272 +++++++ .../clientset/versioned/clientset.go | 120 +++ .../pkg/generated/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 94 +++ .../generated/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 ++ .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 ++ .../v1/clusterversion.openshift.io_client.go | 90 +++ .../v1/cvoconfig.go | 157 ++++ .../clusterversion.openshift.io/v1/doc.go | 20 + .../v1/fake/doc.go | 20 + ...fake_clusterversion.openshift.io_client.go | 40 + .../v1/fake/fake_cvoconfig.go | 128 ++++ .../v1/generated_expansion.go | 21 + .../v1/clusteroperator.go | 174 +++++ .../operatorstatus.openshift.io/v1/doc.go | 20 + .../v1/fake/doc.go | 20 + .../v1/fake/fake_clusteroperator.go | 140 ++++ ...fake_operatorstatus.openshift.io_client.go | 40 + .../v1/generated_expansion.go | 21 + .../v1/operatorstatus.openshift.io_client.go | 90 +++ .../clusterversion.openshift.io/interface.go | 46 ++ .../v1/cvoconfig.go | 89 +++ .../v1/interface.go | 45 ++ .../informers/externalversions/factory.go | 186 +++++ .../informers/externalversions/generic.go | 67 ++ .../internalinterfaces/factory_interfaces.go | 38 + .../operatorstatus.openshift.io/interface.go | 46 ++ .../v1/clusteroperator.go | 89 +++ .../v1/interface.go | 45 ++ .../v1/cvoconfig.go | 94 +++ .../v1/expansion_generated.go | 27 + .../v1/clusteroperator.go | 94 +++ .../v1/expansion_generated.go | 27 + .../pkg/version/version.go | 20 + 123 files changed, 9520 insertions(+) create mode 100644 vendor/github.com/openshift/cluster-version-operator/.gitignore create mode 100644 vendor/github.com/openshift/cluster-version-operator/Dockerfile create mode 100644 vendor/github.com/openshift/cluster-version-operator/Gopkg.lock create mode 100644 vendor/github.com/openshift/cluster-version-operator/Gopkg.toml create mode 100644 vendor/github.com/openshift/cluster-version-operator/LICENSE create mode 100644 vendor/github.com/openshift/cluster-version-operator/OWNERS create mode 100644 vendor/github.com/openshift/cluster-version-operator/README.md create mode 100644 vendor/github.com/openshift/cluster-version-operator/bootstrap/bootstrap-pod.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/cmd/image.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/cmd/main.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/cmd/render.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/cmd/start.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/cmd/version.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/docs/dev/operators.md create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/build-go.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/build-image.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/push-image.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/update-codegen.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/update-vendor.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/verify-codegen.sh create mode 100755 vendor/github.com/openshift/cluster-version-operator/hack/verify-style.sh create mode 100644 vendor/github.com/openshift/cluster-version-operator/hack/yamllint-config.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_00_namespace.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_01_cvoconfig.crd.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_02_roles.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_03_daemonset.yaml create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/manifest.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/manifest_test.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apiext.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apireg.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apps.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/batch.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/core.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/interface.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/rbac.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/register.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/security.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta_test.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/apis.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cluster_id.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cvoconfig.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/register.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/types.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/url.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/register.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/types.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate_test.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cincinnati/cincinnati.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/cvo.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/image.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient/client.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/generic.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/operatorstatus.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/render.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/status.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/sync.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/cvo/updatepayload.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/clusterversion.openshift.io_client.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/cvoconfig.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_clusterversion.openshift.io_client.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_cvoconfig.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/generated_expansion.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/clusteroperator.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_clusteroperator.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_operatorstatus.openshift.io_client.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/generated_expansion.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/operatorstatus.openshift.io_client.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/interface.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/cvoconfig.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/interface.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/factory.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/generic.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/interface.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/clusteroperator.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/interface.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/cvoconfig.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/expansion_generated.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/clusteroperator.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/expansion_generated.go create mode 100644 vendor/github.com/openshift/cluster-version-operator/pkg/version/version.go diff --git a/Gopkg.lock b/Gopkg.lock index 3366a4ffda..7bf8911654 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -232,6 +232,16 @@ pruneopts = "" revision = "739bd301126c341c6caddf2cb3e3935ae9e9fff9" +[[projects]] + digest = "1:5c8f9bc5d91660fb91ef7c9b10dab30492f0fec48b9f6e26e9a1eafdf67a29ce" + name = "github.com/openshift/cluster-version-operator" + packages = [ + "pkg/apis", + "pkg/apis/operatorstatus.openshift.io/v1", + ] + pruneopts = "" + revision = "fe673cb712fa5e27001488fc088ac91bb553353d" + [[projects]] branch = "master" digest = "1:70a46a231f6514e8de976fcfe9c9de2277eecc487ae7bc024fe430602c6f49bd" @@ -667,6 +677,7 @@ "github.com/ghodss/yaml", "github.com/kevinburke/go-bindata", "github.com/openshift/api/route/v1", + "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1", "github.com/operator-framework/operator-sdk/pkg/k8sclient", "github.com/operator-framework/operator-sdk/pkg/sdk", "github.com/operator-framework/operator-sdk/pkg/util/k8sutil", diff --git a/Gopkg.toml b/Gopkg.toml index 3b3c23c46c..c8669d1148 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -41,3 +41,7 @@ required = [ [[constraint]] name = "github.com/kevinburke/go-bindata" version = "v3.11.0" + +[[constraint]] + name = "github.com/openshift/cluster-version-operator" + revision = "fe673cb712fa5e27001488fc088ac91bb553353d" diff --git a/vendor/github.com/openshift/cluster-version-operator/.gitignore b/vendor/github.com/openshift/cluster-version-operator/.gitignore new file mode 100644 index 0000000000..0817f5c937 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/.gitignore @@ -0,0 +1,4 @@ +*.swp +bin/ +.DS_Store +_output diff --git a/vendor/github.com/openshift/cluster-version-operator/Dockerfile b/vendor/github.com/openshift/cluster-version-operator/Dockerfile new file mode 100644 index 0000000000..095b88aee2 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/Dockerfile @@ -0,0 +1,15 @@ +FROM golang:1.10.3 AS build-env + +COPY . /go/src/github.com/openshift/cluster-version-operator +WORKDIR /go/src/github.com/openshift/cluster-version-operator +RUN ./hack/build-go.sh + +# Using alpine instead of scratch because the Job +# used to extract updatepayload from CVO image uses +# cp command. +FROM alpine +COPY --from=build-env /go/src/github.com/openshift/cluster-version-operator/_output/linux/amd64/cluster-version-operator /bin/cluster-version-operator +COPY install /manifests +COPY bootstrap /bootstrap + +ENTRYPOINT ["/bin/cluster-version-operator"] diff --git a/vendor/github.com/openshift/cluster-version-operator/Gopkg.lock b/vendor/github.com/openshift/cluster-version-operator/Gopkg.lock new file mode 100644 index 0000000000..402a83d979 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/Gopkg.lock @@ -0,0 +1,697 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:aba270497eb2d49f5cba6f4162d524b9a1195a24cbce8be20bf56a0051f47deb" + name = "github.com/blang/semver" + packages = ["."] + pruneopts = "NUT" + revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f" + version = "v3.5.1" + +[[projects]] + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NUT" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NUT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NUT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NUT" + revision = "24b0969c4cb722950103eed87108c8d291a8df00" + +[[projects]] + digest = "1:03e14cff610a8a58b774e36bd337fa979482be86aab01be81fb8bbd6d0f07fc8" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "NUT" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NUT" + revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NUT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + digest = "1:1bb197a3b5db4e06e00b7560f8e89836c486627f2a0338332ed37daa003d259e" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "064e2069ce9c359c118179501254f67d7d37ba24" + version = "0.2" + +[[projects]] + digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NUT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NUT" + revision = "9cad4c3443a7200dd6400aef47183728de563a38" + +[[projects]] + branch = "master" + digest = "1:13e2fa5735a82a5fb044f290cfd0dba633d1c5e516b27da0509e0dbb3515a18e" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NUT" + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + +[[projects]] + digest = "1:65300ccc4bcb38b107b868155c303312978981e56bca707c81efec57575b5e06" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58" + version = "v0.3.5" + +[[projects]] + digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "NUT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NUT" + revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82" + version = "1.1.4" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NUT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NUT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + branch = "master" + digest = "1:d19bfb7d7a7ea78247347809f9a1a8c41deab3bc81331a537fa657d590a9f802" + name = "github.com/openshift/api" + packages = [ + "image/docker10", + "image/dockerpre012", + "image/v1", + "security/v1", + ] + pruneopts = "NUT" + revision = "4507dcb6a81f6593aa336c5b47927cf0ae297b72" + +[[projects]] + branch = "master" + digest = "1:31493a29ce9941368a58428e797b24764b6faf801aad841ac2311d4496b6114a" + name = "github.com/openshift/client-go" + packages = [ + "security/clientset/versioned/scheme", + "security/clientset/versioned/typed/security/v1", + ] + pruneopts = "NUT" + revision = "431ec9a26e5021f35fa41ee9a89842db9bfdb370" + +[[projects]] + branch = "master" + digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NUT" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NUT" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "NUT" + revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" + version = "v0.0.3" + +[[projects]] + digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" + +[[projects]] + branch = "master" + digest = "1:7c335f12c7a49ee399afb1af500c6acd86857507848740836564e047a2bbd1b2" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] + pruneopts = "NUT" + revision = "3673e40ba22529d22c3fd7c93e97b0ce50fa7bdd" + +[[projects]] + branch = "master" + digest = "1:43a352083eca9cd2a8e74419460d5767885501265cfca9c7204cc085aead1361" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NUT" + revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e" + +[[projects]] + digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "NUT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NUT" + revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" + +[[projects]] + branch = "master" + digest = "1:e1c96c8c8ce0af57da9dccb008e540b3d13b55ea04b530fb4fceb81706082bdd" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + "internal/fastwalk", + ] + pruneopts = "NUT" + revision = "8cc4e8a6f4841aa92a8683fca47bc5d64b58875b" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NUT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[[projects]] + digest = "1:ef716a2116d8a040e16fbcd7fca71d3354915a94720de6af22c7a09970234296" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NUT" + revision = "2d6f90ab1293a1fb871cf149423ebb72aa7423aa" + version = "kubernetes-1.11.1" + +[[projects]] + digest = "1:82327daaffa8cd2a1f3e4d4c2c98288bbcfa7a46a508c0ee93158eac5f7e3a76" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset", + "pkg/client/clientset/clientset/scheme", + "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", + "pkg/client/informers/externalversions", + "pkg/client/informers/externalversions/apiextensions", + "pkg/client/informers/externalversions/apiextensions/v1beta1", + "pkg/client/informers/externalversions/internalinterfaces", + "pkg/client/listers/apiextensions/v1beta1", + ] + pruneopts = "NUT" + revision = "06dfdaae5c2bd89e1243151ff65b9bf8ee050f28" + version = "kubernetes-1.11.1" + +[[projects]] + digest = "1:159c095dfb7597e624380203dc6151fe870f7c52b4950a3459ecf1890f6500e2" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/equality", + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NUT" + revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + version = "kubernetes-1.11.1" + +[[projects]] + digest = "1:e9232c127196055e966ae56877363f82fb494dd8c7fda0112b477e1339082d05" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/cached", + "discovery/fake", + "dynamic", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1alpha1", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "listers/admissionregistration/v1alpha1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "restmapper", + "testing", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/leaderelection", + "tools/leaderelection/resourcelock", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/reference", + "transport", + "util/buffer", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/retry", + "util/workqueue", + ] + pruneopts = "NUT" + revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + version = "v8.0.0" + +[[projects]] + digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "pkg/util", + ] + pruneopts = "T" + revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a" + version = "kubernetes-1.11.1" + +[[projects]] + digest = "1:9550042274e1e489c65f87f76edda9f651f2bbf41f3ad2d41acef5133a403b79" + name = "k8s.io/gengo" + packages = [ + "args", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "T" + revision = "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + +[[projects]] + digest = "1:03ab6a5fd79eb77eff18da6eb498ea0d68010b5af0cfa195230db43708459520" + name = "k8s.io/kube-aggregator" + packages = [ + "pkg/apis/apiregistration", + "pkg/apis/apiregistration/v1", + "pkg/apis/apiregistration/v1beta1", + "pkg/client/clientset_generated/clientset/scheme", + "pkg/client/clientset_generated/clientset/typed/apiregistration/v1", + ] + pruneopts = "NUT" + revision = "89cd614e9090a2f1e78316ed459857c49c55d276" + version = "kubernetes-1.11.1" + +[[projects]] + branch = "master" + digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "NUT" + revision = "d8ea2fe547a448256204cfc68dfee7b26c720acb" + +[[projects]] + digest = "1:ff54706d46de40c865b5fcfc4bde1087c02510cd12e0150de8e405ab427d9907" + name = "k8s.io/utils" + packages = ["pointer"] + pruneopts = "NUT" + revision = "045dc31ee5c40e8240241ce28dc24d7b56130373" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/blang/semver", + "github.com/davecgh/go-spew/spew", + "github.com/golang/glog", + "github.com/google/uuid", + "github.com/openshift/api/image/v1", + "github.com/openshift/api/security/v1", + "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1", + "github.com/spf13/cobra", + "k8s.io/api/apps/v1", + "k8s.io/api/batch/v1", + "k8s.io/api/batch/v1beta1", + "k8s.io/api/core/v1", + "k8s.io/api/rbac/v1", + "k8s.io/api/rbac/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", + "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/errors", + "k8s.io/apimachinery/pkg/util/rand", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/strategicpatch", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/util/yaml", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/cached", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/apps/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/kubernetes/typed/batch/v1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/rbac/v1", + "k8s.io/client-go/listers/apps/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/restmapper", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/leaderelection", + "k8s.io/client-go/tools/leaderelection/resourcelock", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/gengo/types", + "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1", + "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1", + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1", + "k8s.io/utils/pointer", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/openshift/cluster-version-operator/Gopkg.toml b/vendor/github.com/openshift/cluster-version-operator/Gopkg.toml new file mode 100644 index 0000000000..62af94ba4d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/Gopkg.toml @@ -0,0 +1,69 @@ +required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types" ] + +[prune] + non-go = true + go-tests = true + unused-packages = true + +[[prune.project]] + name = "k8s.io/code-generator" + non-go = false + unused-packages = false + +[[prune.project]] + name = "k8s.io/gengo" + non-go = false + unused-packages = false + +[[constraint]] + branch = "master" + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/google/uuid" + version = "0.2.0" + +[[constraint]] + name = "k8s.io/api" + version = "kubernetes-1.11.1" + +[[constraint]] + name = "k8s.io/client-go" + version = "8.0.0" + +[[constraint]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.11.1" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.11.1" + +[[constraint]] + name = "k8s.io/code-generator" + version = "kubernetes-1.11.1" + +[[constraint]] + name = "k8s.io/kube-aggregator" + version = "kubernetes-1.11.1" + +[[constraint]] + name = "k8s.io/utils" + revision = "045dc31ee5c40e8240241ce28dc24d7b56130373" + +[[override]] + name = "k8s.io/gengo" + revision = "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "v0.0.3" + +[[constraint]] + name = "github.com/openshift/api" + branch = "master" + +[[constraint]] + name = "github.com/openshift/client-go" + branch = "master" + diff --git a/vendor/github.com/openshift/cluster-version-operator/LICENSE b/vendor/github.com/openshift/cluster-version-operator/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-version-operator/OWNERS b/vendor/github.com/openshift/cluster-version-operator/OWNERS new file mode 100644 index 0000000000..5056625f20 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + - crawford + - smarterclayton + - yifan-gu + - abhinavdahiya + - wking diff --git a/vendor/github.com/openshift/cluster-version-operator/README.md b/vendor/github.com/openshift/cluster-version-operator/README.md new file mode 100644 index 0000000000..0c7b627ea1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/README.md @@ -0,0 +1,45 @@ +# Cluster Version Operator (CVO) + +## Building and Publishing CVO + +```sh +./hack/build-image.sh && REPO= ./hack/push-image.go +``` + +1. This builds image locally and then pushes `${VERSION}` and `latest` tags to `${REPO}/origin-cluster-version-operator`. + +2. `${VERSION}` encodes the Git commit used to build the images. + +## Building release image using local CVO + +1. Make sure you have `oc` binary from https://github.com/openshift/origin master as it requires `adm release` subcommand. + +2. Run the following command to create release-image at `docker.io/abhinavdahiya/origin-release:latest`: + +```sh +oc adm release new -n openshift --server https://api.ci.openshift.org \ + --from-image-stream=origin-v4.0 \ + --to-image-base=docker.io/abhinavdahiya/origin-cluster-version-operator:latest \ + --to-image docker.io/abhinavdahiya/origin-release:latest +``` + +## Using CVO to render the release-payload locally + +1. Run the following command to get render the release-payload contents to `/tmp/cvo` + +```sh +podman run --rm -ti \ + -v /tmp/cvo:/tmp/cvo:z \ + \ + render \ + --output-dir=/tmp/cvo \ + --release-image="" +``` + +`` can be personal release image generated using [this](#building-release-image-using-local-cvo) or Origin's release image like `registry.svc.ci.openshift.org/openshift/origin-release:v4.0`. + +## Installing CVO and operators in cluster. + +1. Use CVO `render` to render all the manifests from release-payload to a directory. [here](#using-cvo-to-render-the-release-payload-locally) + +2. Create the operators from the manifests by using `oc create -f `. diff --git a/vendor/github.com/openshift/cluster-version-operator/bootstrap/bootstrap-pod.yaml b/vendor/github.com/openshift/cluster-version-operator/bootstrap/bootstrap-pod.yaml new file mode 100644 index 0000000000..1a93b6fecc --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/bootstrap/bootstrap-pod.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bootstrap-cluster-version-operator + namespace: openshift-cluster-version + labels: + k8s-app: cluster-version-operator +spec: + containers: + - name: cluster-version-operator + image: {{.ReleaseImage}} + imagePullPolicy: Always + args: + - "start" + - "--release-image={{.ReleaseImage}}" + - "--enable-auto-update=false" + - "--v=4" + - "--kubeconfig=/etc/kubernetes/kubeconfig" + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: etc-ssl-certs + readOnly: true + - mountPath: /etc/kubernetes/kubeconfig + name: kubeconfig + readOnly: true + env: + - name: KUBERNETES_SERVICE_PORT # allows CVO to communicate with apiserver directly on same host. + value: "6443" + - name: KUBERNETES_SERVICE_HOST # allows CVO to communicate with apiserver directly on same host. + value: "127.0.0.1" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + path: /etc/kubernetes/kubeconfig + - name: etc-ssl-certs + hostPath: + path: /etc/ssl/certs diff --git a/vendor/github.com/openshift/cluster-version-operator/cmd/image.go b/vendor/github.com/openshift/cluster-version-operator/cmd/image.go new file mode 100644 index 0000000000..3798e8702b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/cmd/image.go @@ -0,0 +1,39 @@ +package main + +import ( + "flag" + "fmt" + + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/openshift/cluster-version-operator/pkg/cvo" +) + +var ( + imageCmd = &cobra.Command{ + Use: "image", + Short: "Returns image for requested short-name from UpdatePayload", + Long: "", + Example: "%[1] image ", + Run: runImageCmd, + } +) + +func init() { + rootCmd.AddCommand(imageCmd) +} + +func runImageCmd(cmd *cobra.Command, args []string) { + flag.Set("logtostderr", "true") + flag.Parse() + + if len(args) == 0 { + glog.Fatalf("missing command line argument short-name") + } + image, err := cvo.ImageForShortName(args[0]) + if err != nil { + glog.Fatalf("error: %v", err) + } + fmt.Printf(image) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/cmd/main.go b/vendor/github.com/openshift/cluster-version-operator/cmd/main.go new file mode 100644 index 0000000000..e9203aa682 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/cmd/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "flag" + + "github.com/golang/glog" + "github.com/spf13/cobra" +) + +const ( + componentName = "cluster-version-operator" + componentNamespace = "openshift-cluster-version" +) + +var ( + rootCmd = &cobra.Command{ + Use: componentName, + Short: "Run Cluster Version Controller", + Long: "", + } + + rootOpts struct { + releaseImage string + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) + rootCmd.PersistentFlags().StringVar(&rootOpts.releaseImage, "release-image", "", "The Openshift release image url.") +} + +func main() { + if err := rootCmd.Execute(); err != nil { + glog.Exitf("Error executing mcc: %v", err) + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/cmd/render.go b/vendor/github.com/openshift/cluster-version-operator/cmd/render.go new file mode 100644 index 0000000000..4f994be9a7 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/cmd/render.go @@ -0,0 +1,43 @@ +package main + +import ( + "flag" + + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/openshift/cluster-version-operator/pkg/cvo" +) + +var ( + renderCmd = &cobra.Command{ + Use: "render", + Short: "Renders the UpdatePayload to disk.", + Long: "", + Run: runRenderCmd, + } + + renderOpts struct { + outputDir string + } +) + +func init() { + rootCmd.AddCommand(renderCmd) + renderCmd.PersistentFlags().StringVar(&renderOpts.outputDir, "output-dir", "", "The output directory where the manifests will be rendered.") +} + +func runRenderCmd(cmd *cobra.Command, args []string) { + flag.Set("logtostderr", "true") + flag.Parse() + + if renderOpts.outputDir == "" { + glog.Fatalf("missing --output-dir flag, it is required") + } + if rootOpts.releaseImage == "" { + glog.Fatalf("missing --release-image flag, it is required") + } + if err := cvo.Render(renderOpts.outputDir, rootOpts.releaseImage); err != nil { + glog.Fatalf("Render command failed: %v", err) + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/cmd/start.go b/vendor/github.com/openshift/cluster-version-operator/cmd/start.go new file mode 100644 index 0000000000..cc7e6d19f0 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/cmd/start.go @@ -0,0 +1,255 @@ +package main + +import ( + "flag" + "math/rand" + "os" + "time" + + "github.com/golang/glog" + "github.com/google/uuid" + "github.com/openshift/cluster-version-operator/pkg/autoupdate" + "github.com/openshift/cluster-version-operator/pkg/cvo" + clientset "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + informers "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions" + "github.com/openshift/cluster-version-operator/pkg/version" + "github.com/spf13/cobra" + "k8s.io/api/core/v1" + apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" +) + +const ( + minResyncPeriod = 10 * time.Second + + leaseDuration = 90 * time.Second + renewDeadline = 45 * time.Second + retryPeriod = 30 * time.Second +) + +var ( + startCmd = &cobra.Command{ + Use: "start", + Short: "Starts Cluster Version Operator", + Long: "", + Run: runStartCmd, + } + + startOpts struct { + kubeconfig string + nodeName string + + enableAutoUpdate bool + } +) + +func init() { + rootCmd.AddCommand(startCmd) + startCmd.PersistentFlags().StringVar(&startOpts.kubeconfig, "kubeconfig", "", "Kubeconfig file to access a remote cluster (testing only)") + startCmd.PersistentFlags().StringVar(&startOpts.nodeName, "node-name", "", "kubernetes node name CVO is scheduled on.") + startCmd.PersistentFlags().BoolVar(&startOpts.enableAutoUpdate, "enable-auto-update", true, "Enables the autoupdate controller.") +} + +func runStartCmd(cmd *cobra.Command, args []string) { + flag.Set("logtostderr", "true") + flag.Parse() + + // To help debugging, immediately log version + glog.Infof("%s", version.String) + + if startOpts.nodeName == "" { + name, ok := os.LookupEnv("NODE_NAME") + if !ok || name == "" { + glog.Fatalf("node-name is required") + } + startOpts.nodeName = name + } + + if rootOpts.releaseImage == "" { + glog.Fatalf("missing --release-image flag, it is required") + } + + cb, err := newClientBuilder(startOpts.kubeconfig) + if err != nil { + glog.Fatalf("error creating clients: %v", err) + } + stopCh := make(chan struct{}) + run := func(stop <-chan struct{}) { + + ctx := createControllerContext(cb, stopCh) + if err := startControllers(ctx); err != nil { + glog.Fatalf("error starting controllers: %v", err) + } + + ctx.InformerFactory.Start(ctx.Stop) + ctx.KubeInformerFactory.Start(ctx.Stop) + ctx.APIExtInformerFactory.Start(ctx.Stop) + close(ctx.InformersStarted) + + select {} + } + + leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ + Lock: createResourceLock(cb), + LeaseDuration: leaseDuration, + RenewDeadline: renewDeadline, + RetryPeriod: retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: run, + OnStoppedLeading: func() { + glog.Fatalf("leaderelection lost") + }, + }, + }) + panic("unreachable") +} + +func createResourceLock(cb *clientBuilder) resourcelock.Interface { + recorder := record. + NewBroadcaster(). + NewRecorder(runtime.NewScheme(), v1.EventSource{Component: componentName}) + + id, err := os.Hostname() + if err != nil { + glog.Fatalf("error creating lock: %v", err) + } + + uuid, err := uuid.NewRandom() + if err != nil { + glog.Fatalf("Failed to generate UUID: %v", err) + } + + // add a uniquifier so that two processes on the same host don't accidentally both become active + id = id + "_" + uuid.String() + + return &resourcelock.ConfigMapLock{ + ConfigMapMeta: metav1.ObjectMeta{ + Namespace: componentNamespace, + Name: componentName, + }, + Client: cb.KubeClientOrDie("leader-election").CoreV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + EventRecorder: recorder, + }, + } +} + +func resyncPeriod() func() time.Duration { + return func() time.Duration { + factor := rand.Float64() + 1 + return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) + } +} + +type clientBuilder struct { + config *rest.Config +} + +func (cb *clientBuilder) RestConfig() *rest.Config { + c := rest.CopyConfig(cb.config) + return c +} + +func (cb *clientBuilder) ClientOrDie(name string) clientset.Interface { + return clientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func (cb *clientBuilder) KubeClientOrDie(name string) kubernetes.Interface { + return kubernetes.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func (cb *clientBuilder) APIExtClientOrDie(name string) apiext.Interface { + return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func newClientBuilder(kubeconfig string) (*clientBuilder, error) { + var config *rest.Config + var err error + + if kubeconfig != "" { + glog.V(4).Infof("Loading kube client config from path %q", kubeconfig) + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + } else { + glog.V(4).Infof("Using in-cluster kube client config") + config, err = rest.InClusterConfig() + } + if err != nil { + return nil, err + } + + return &clientBuilder{ + config: config, + }, nil +} + +type controllerContext struct { + ClientBuilder *clientBuilder + + InformerFactory informers.SharedInformerFactory + KubeInformerFactory kubeinformers.SharedInformerFactory + APIExtInformerFactory apiextinformers.SharedInformerFactory + + Stop <-chan struct{} + + InformersStarted chan struct{} + + ResyncPeriod func() time.Duration +} + +func createControllerContext(cb *clientBuilder, stop <-chan struct{}) *controllerContext { + client := cb.ClientOrDie("shared-informer") + kubeClient := cb.KubeClientOrDie("kube-shared-informer") + apiExtClient := cb.APIExtClientOrDie("apiext-shared-informer") + + sharedInformers := informers.NewSharedInformerFactory(client, resyncPeriod()()) + kubeSharedInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod()()) + apiExtSharedInformer := apiextinformers.NewSharedInformerFactory(apiExtClient, resyncPeriod()()) + + return &controllerContext{ + ClientBuilder: cb, + InformerFactory: sharedInformers, + KubeInformerFactory: kubeSharedInformer, + APIExtInformerFactory: apiExtSharedInformer, + Stop: stop, + InformersStarted: make(chan struct{}), + ResyncPeriod: resyncPeriod(), + } +} + +func startControllers(ctx *controllerContext) error { + go cvo.New( + startOpts.nodeName, + componentNamespace, componentName, + rootOpts.releaseImage, + ctx.InformerFactory.Clusterversion().V1().CVOConfigs(), + ctx.InformerFactory.Operatorstatus().V1().ClusterOperators(), + ctx.APIExtInformerFactory.Apiextensions().V1beta1().CustomResourceDefinitions(), + ctx.KubeInformerFactory.Apps().V1().Deployments(), + ctx.ClientBuilder.RestConfig(), + ctx.ClientBuilder.ClientOrDie(componentName), + ctx.ClientBuilder.KubeClientOrDie(componentName), + ctx.ClientBuilder.APIExtClientOrDie(componentName), + ).Run(2, ctx.Stop) + + if startOpts.enableAutoUpdate { + go autoupdate.New( + componentNamespace, componentName, + ctx.InformerFactory.Clusterversion().V1().CVOConfigs(), + ctx.InformerFactory.Operatorstatus().V1().ClusterOperators(), + ctx.ClientBuilder.ClientOrDie(componentName), + ctx.ClientBuilder.KubeClientOrDie(componentName), + ).Run(2, ctx.Stop) + } + + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/cmd/version.go b/vendor/github.com/openshift/cluster-version-operator/cmd/version.go new file mode 100644 index 0000000000..be28dc2a3b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/cmd/version.go @@ -0,0 +1,29 @@ +package main + +import ( + "flag" + "fmt" + + "github.com/openshift/cluster-version-operator/pkg/version" + "github.com/spf13/cobra" +) + +var ( + versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Cluster Version Operator", + Long: `All software has versions. This is Cluster Version Operator's.`, + Run: runVersionCmd, + } +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +func runVersionCmd(cmd *cobra.Command, args []string) { + flag.Set("logtostderr", "true") + flag.Parse() + + fmt.Println(version.String) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/docs/dev/operators.md b/vendor/github.com/openshift/cluster-version-operator/docs/dev/operators.md new file mode 100644 index 0000000000..786ad09e33 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/docs/dev/operators.md @@ -0,0 +1,108 @@ +# Second Level Operator integration with CVO + +## How do I get added to the release payload? +Add the following to your Dockerfile +``` +FROM … + +ADD manifests-for-operator/ /manifests +LABEL io.openshift.release.operator=true +``` + +Ensure your image is published into the cluster release tag by ci-operator +Wait for a new release payload to be created (usually once you push to master in your operator). + +## What do I put in /manifests? + +You need the following: + +1..N manifest yaml or JSON files (preferably YAML for readability) that deploy your operator, including: + + - Namespace for your operator + - Roles your operator needs + - A service account and a service account role binding + - Deployment for your operator + - An OperatorStatus CR + - Any other config objects your operator might need + - An image-references file (See below) + +In your deployment you can reference the latest development version of your operator image (quay.io/openshift/origin-machine-api-operator:latest). If you have other hard-coded image strings, try to put them as environment variables on your deployment or as a config map. + +### Names of manifest files + +Your manifests will be applied in alphabetical order by the CVO, so name your files in the order you want them run. +If you are a normal operator (don’t need to run before the kube apiserver), you should name your manifest files in a way that feels easy: + +``` +/manifests/ + deployment.yaml + roles.yaml +``` + +If you’d like to ensure your manifests are applied in order to the cluster add a numeric prefix to sort in the directory: + +``` +/manifests/ + 01_roles.yaml + 02_deployment.yaml +``` + +When your manifests are added to the release payload, they’ll be given a prefix that corresponds to the name of your repo/image: + +``` +/release-manifests/ + 99_ingress-operator_01_roles.yaml + 99_ingress-operator_02_deployment.yaml +``` + +### How do I get added as a special run level? +Some operators need to run at a specific time in the release process (OLM, kube, openshift core operators, network, service CA). These components can ensure they run in a specific order across operators by prefixing their manifests with: + + 0000__- + +For example, the Kube core operators run in runlevel 10-19 and have filenames like + + 0000_13_cluster-kube-scheduler-operator_03_crd.yaml + +Assigned runlevels + + - 00-04 - CVO + - 07 - Network operator + - 08 - DNS operator + - 09 - Service signer CA + - 10-19 - Kube operators (master team) + - 20-29 - OpenShift core operators (master team) + - 30-39 - OLM + +## How do I ensure the right images get used by my manifests? +Your manifests can contain a tag to the latest development image published by Origin. You’ll annotate your manifests by creating a file that identifies those images. + +Assume you have two images in your manifests - `quay.io/openshift/origin-ingress-operator:latest` and `quay.io/openshift/origin-haproxy-router:latest`. Those correspond to the following tags `ingress-operator` and `haproxy-router` when the CI runs. + +Create a file `image-references` in the /manifests dir with the following contents: + +``` +kind: ImageStream +apiVersion: image.openshift.io/v1 +spec: + tags: + - name: ingress-operator + from: + kind: DockerImage + Name: quay.io/openshift/origin-ingress-operator + - name: haproxy-router + from: + kind: DockerImage + Name: quay.io/openshift/origin-haproxy-router +``` + +The release tooling will read image-references and do the following operations: + +Verify that the tags `ingress-operator` and `haproxy-router` exist from the release / CI tooling (in the image stream `openshift/origin-v4.0` on api.ci). If they don’t exist, you’ll get a build error. +Do a find and replace in your manifests (effectively a sed) that replaces `quay.io/openshift/origin-haproxy-router(:.*|@:.*)` with `registry.svc.ci.openshift.org/openshift/origin-v4.0@sha256:` +Store the fact that operator ingress-operator uses both of those images in a metadata file alongside the manifests +Bundle up your manifests and the metadata file as a docker image and push them to a registry + +Later on, when someone wants to mirror a particular release, there will be tooling that can take the list of all images used by operators and mirror them to a new repo. + +This pattern tries to balance between having the manifests in your source repo be able to deploy your latest upstream code *and* allowing us to get a full listing of all images used by various operators. diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/build-go.sh b/vendor/github.com/openshift/cluster-version-operator/hack/build-go.sh new file mode 100755 index 0000000000..116b43bdce --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/build-go.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -eu + +REPO=github.com/openshift/cluster-version-operator +GOFLAGS=${GOFLAGS:-} +GLDFLAGS=${GLDFLAGS:-} + +eval $(go env | grep -e "GOHOSTOS" -e "GOHOSTARCH") + +GOOS=${GOOS:-${GOHOSTOS}} +GOARCH=${GOACH:-${GOHOSTARCH}} + +# Go to the root of the repo +cd "$(git rev-parse --show-cdup)" + +if [ -z ${VERSION+a} ]; then + echo "Using version from git..." + VERSION=$(git describe --abbrev=8 --dirty --always) +fi + +GLDFLAGS+="-X ${REPO}/pkg/version.Raw=${VERSION}" + +eval $(go env) + +if [ -z ${BIN_PATH+a} ]; then + export BIN_PATH=_output/${GOOS}/${GOARCH} +fi + +mkdir -p ${BIN_PATH} + +echo "Building ${REPO} (${VERSION})" +CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o ${BIN_PATH}/cluster-version-operator ${REPO}/cmd/... diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/build-image.sh b/vendor/github.com/openshift/cluster-version-operator/hack/build-image.sh new file mode 100755 index 0000000000..5fb942b549 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/build-image.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -eu + +# Print errors to stderr +function print_error { + echo "ERROR: $1" >&2 +} + +function print_info { + echo "INFO: $1" >&2 +} + +# Warn when unprivileged +if [ `id --user` -ne 0 ]; then + print_error "Note: Building unprivileged may fail due to permissions" +fi + +if [ -z ${VERSION+a} ]; then + print_info "Using version from git..." + VERSION=$(git describe --abbrev=8 --dirty --always) +fi + +set -x +podman build -t "cluster-version-operator:${VERSION}" -f Dockerfile --no-cache \ No newline at end of file diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/push-image.sh b/vendor/github.com/openshift/cluster-version-operator/hack/push-image.sh new file mode 100755 index 0000000000..a2bc711524 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/push-image.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -eu + + +function print_info { + echo "INFO: $1" >&2 +} + +REPO=${REPO:-"openshift"} + +if [ -z ${VERSION+a} ]; then + print_info "Using version from git..." + VERSION=$(git describe --abbrev=8 --dirty --always) +fi + +set -x +podman push "cluster-version-operator:${VERSION}" "${REPO}/origin-cluster-version-operator:${VERSION}" +podman push "cluster-version-operator:${VERSION}" "${REPO}/origin-cluster-version-operator:latest" diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/update-codegen.sh b/vendor/github.com/openshift/cluster-version-operator/hack/update-codegen.sh new file mode 100755 index 0000000000..8586c51405 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/update-codegen.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +PROJECT_ROOT="$(dirname "${BASH_SOURCE[0]}")/.." + +${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh \ + all \ + github.com/openshift/cluster-version-operator/pkg/generated \ + github.com/openshift/cluster-version-operator/pkg/apis \ + "clusterversion.openshift.io:v1 operatorstatus.openshift.io:v1" \ + $@ \ diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/update-vendor.sh b/vendor/github.com/openshift/cluster-version-operator/hack/update-vendor.sh new file mode 100755 index 0000000000..e71fc5eb72 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/update-vendor.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# +# This script updates Go vendoring using dep. + +set -euo pipefail + +# Go to the root of the repo +cd "$(git rev-parse --show-cdup)" + +# Run dep. +dep ensure + +(cd hack && ./update-codegen.sh) diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/verify-codegen.sh b/vendor/github.com/openshift/cluster-version-operator/hack/verify-codegen.sh new file mode 100755 index 0000000000..f86b5ea0a2 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/verify-codegen.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. + +DIFFROOT="${SCRIPT_ROOT}/pkg" +TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" +_tmp="${SCRIPT_ROOT}/_tmp" + +cleanup() { + rm -rf "${_tmp}" +} +trap "cleanup" EXIT SIGINT + +cleanup + +mkdir -p "${TMP_DIFFROOT}" +cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" + +"${SCRIPT_ROOT}/hack/update-codegen.sh" +echo "diffing ${DIFFROOT} against freshly generated codegen" +ret=0 +diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$? +cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}" +if [[ $ret -eq 0 ]] +then + echo "${DIFFROOT} up to date." +else + echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh" + exit 1 +fi diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/verify-style.sh b/vendor/github.com/openshift/cluster-version-operator/hack/verify-style.sh new file mode 100755 index 0000000000..28fb134166 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/verify-style.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# This script invokes tools that should be run prior to pushing +# a repo, such as linters. This is designed to prevent running +# CI on code that will have to be changed. + +set -uo pipefail + +if [[ ! $(which go) ]]; then + echo "go not found on PATH. To install:" + echo "https://golang.org/dl/" + exit 1 +fi +if [[ ! $(which golint) ]]; then + echo "golint not found on PATH. To install:" + echo "go get -u github.com/golang/lint/golint" + exit 1 +fi +if [[ ! $(which yamllint) ]]; then + echo "yamllint not found on PATH. To install:" + echo "https://github.com/adrienverge/yamllint" + exit 1 +fi + +rc=0 +trap 'rc=$?' ERR + +# Go to the root of the repo +cd "$(git rev-parse --show-cdup)" + +GOFILES=$(find . -path ./vendor -prune -o -name '*.go' | grep -v vendor) +GOPKGS=$(go list ./... | grep -v '/vendor/' | grep -v '/generated/') + +echo "Running gofmt..." +gofmt -s -d $GOFILES + +echo "Running go vet..." +go vet $GOPKGS + +echo "Running golint..." +golint -set_exit_status $GOPKGS + +echo "Running yamllint..." +YAMLS=$(find . -path ./vendor -prune -o -name '*.yaml' | grep -v vendor) +yamllint -c hack/yamllint-config.yaml -s $YAMLS + +echo "Running verify code-generators" +(cd hack && ./verify-codegen.sh) + +echo "Done!" +exit ${rc} diff --git a/vendor/github.com/openshift/cluster-version-operator/hack/yamllint-config.yaml b/vendor/github.com/openshift/cluster-version-operator/hack/yamllint-config.yaml new file mode 100644 index 0000000000..489d922fca --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/hack/yamllint-config.yaml @@ -0,0 +1,31 @@ +--- + +extends: default + +rules: + braces: + level: warning + max-spaces-inside: 1 + brackets: + level: warning + max-spaces-inside: 1 + colons: + level: warning + commas: + level: warning + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + level: warning + hyphens: + level: warning + indentation: + level: warning + indent-sequences: consistent + line-length: disable + truthy: disable + +ignore: | + # The following have yaml-syntax-breaking templating, but are covered + # by other yaml files in the unit tests. diff --git a/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_00_namespace.yaml b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_00_namespace.yaml new file mode 100644 index 0000000000..b2bc35743b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_00_namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-cluster-version + labels: + name: openshift-cluster-version + openshift.io/run-level: "1" diff --git a/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_01_cvoconfig.crd.yaml b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_01_cvoconfig.crd.yaml new file mode 100644 index 0000000000..c9f65274ea --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_01_cvoconfig.crd.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: cvoconfigs.clusterversion.openshift.io +spec: + # group name to use for REST API: /apis// + group: clusterversion.openshift.io + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + # either Namespaced or Cluster + scope: Namespaced + subresources: + # enable spec/status + status: {} + names: + # plural name to be used in the URL: /apis/// + plural: cvoconfigs + # singular name to be used as an alias on the CLI and for display + singular: cvoconfig + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: CVOConfig diff --git a/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_02_roles.yaml b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_02_roles.yaml new file mode 100644 index 0000000000..d5c743678c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_02_roles.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-version-operator +roleRef: + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + namespace: openshift-cluster-version + name: default diff --git a/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_03_daemonset.yaml b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_03_daemonset.yaml new file mode 100644 index 0000000000..2712895544 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/install/0000_00_cluster-version-operator_03_daemonset.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cluster-version-operator + namespace: openshift-cluster-version +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + k8s-app: cluster-version-operator + template: + metadata: + name: cluster-version-operator + labels: + k8s-app: cluster-version-operator + spec: + containers: + - name: cluster-version-operator + image: {{.ReleaseImage}} + imagePullPolicy: Always + args: + - "start" + - "--release-image={{.ReleaseImage}}" + - "--enable-auto-update=false" + - "--v=4" + volumeMounts: + - mountPath: /etc/ssl/certs + name: etc-ssl-certs + readOnly: true + - mountPath: /etc/cvo/updatepayloads + name: etc-cvo-updatepayloads + readOnly: true + env: + - name: KUBERNETES_SERVICE_PORT # allows CVO to communicate with apiserver directly on same host. + value: "6443" + - name: KUBERNETES_SERVICE_HOST # allows CVO to communicate with apiserver directly on same host. + value: "127.0.0.1" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - operator: Exists + volumes: + - name: etc-ssl-certs + hostPath: + path: /etc/ssl/certs + - name: etc-cvo-updatepayloads + hostPath: + path: /etc/cvo/updatepayloads diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/manifest.go b/vendor/github.com/openshift/cluster-version-operator/lib/manifest.go new file mode 100644 index 0000000000..412d1f6a4c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/manifest.go @@ -0,0 +1,109 @@ +package lib + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" +) + +// Manifest stores Kubernetes object in Raw from a file. +// It stores the GroupVersionKind for the manifest. +type Manifest struct { + Raw []byte + GVK schema.GroupVersionKind + + obj *unstructured.Unstructured +} + +// UnmarshalJSON unmarshals bytes of single kubernetes object to Manifest. +func (m *Manifest) UnmarshalJSON(in []byte) error { + if m == nil { + return errors.New("Manifest: UnmarshalJSON on nil pointer") + } + + // This happens when marshalling + // + // --- (this between two `---`) + // --- + // + if bytes.Equal(in, []byte("null")) { + m.Raw = nil + return nil + } + + m.Raw = append(m.Raw[0:0], in...) + udi, _, err := scheme.Codecs.UniversalDecoder().Decode(in, nil, &unstructured.Unstructured{}) + if err != nil { + return fmt.Errorf("unable to decode manifest: %v", err) + } + ud, ok := udi.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("expected manifest to decode into *unstructured.Unstructured, got %T", ud) + } + + m.GVK = ud.GroupVersionKind() + m.obj = ud.DeepCopy() + return nil +} + +// Object returns underlying metav1.Object +func (m *Manifest) Object() metav1.Object { return m.obj } + +// ManifestsFromFiles reads files and returns Manifests in the same order. +// files should be list of absolute paths for the manifests on disk. +func ManifestsFromFiles(files []string) ([]Manifest, error) { + var manifests []Manifest + var errs []error + for _, file := range files { + file, err := os.Open(file) + if err != nil { + errs = append(errs, fmt.Errorf("error opening %s: %v", file.Name(), err)) + continue + } + defer file.Close() + + ms, err := ParseManifests(file) + if err != nil { + errs = append(errs, fmt.Errorf("error parsing %s: %v", file.Name(), err)) + continue + } + manifests = append(manifests, ms...) + } + + agg := utilerrors.NewAggregate(errs) + if agg != nil { + return nil, fmt.Errorf("error loading manifests: %v", agg.Error()) + } + + return manifests, nil +} + +// ParseManifests parses a YAML or JSON document that may contain one or more +// kubernetes resources. +func ParseManifests(r io.Reader) ([]Manifest, error) { + d := yaml.NewYAMLOrJSONDecoder(r, 1024) + var manifests []Manifest + for { + m := Manifest{} + if err := d.Decode(&m); err != nil { + if err == io.EOF { + return manifests, nil + } + return manifests, fmt.Errorf("error parsing: %v", err) + } + m.Raw = bytes.TrimSpace(m.Raw) + if len(m.Raw) == 0 || bytes.Equal(m.Raw, []byte("null")) { + continue + } + manifests = append(manifests, m) + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/manifest_test.go b/vendor/github.com/openshift/cluster-version-operator/lib/manifest_test.go new file mode 100644 index 0000000000..2822031754 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/manifest_test.go @@ -0,0 +1,317 @@ +package lib + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestParseManifests(t *testing.T) { + tests := []struct { + name string + raw string + want []Manifest + }{{ + name: "ingress", + raw: ` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + namespace: test-namespace +spec: + rules: + - http: + paths: + - path: /testpath + backend: + serviceName: test + servicePort: 80 +`, + want: []Manifest{{ + Raw: []byte(`{"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"name":"test-ingress","namespace":"test-namespace"},"spec":{"rules":[{"http":{"paths":[{"backend":{"serviceName":"test","servicePort":80},"path":"/testpath"}]}}]}}`), + GVK: schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}, + }}, + }, { + name: "configmap", + raw: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +`, + want: []Manifest{{ + Raw: []byte(`{"apiVersion":"v1","data":{"color":"red","multi-line":"hello world\nhow are you?\n"},"kind":"ConfigMap","metadata":{"name":"a-config","namespace":"default"}}`), + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }}, + }, { + name: "two-resources", + raw: ` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + namespace: test-namespace +spec: + rules: + - http: + paths: + - path: /testpath + backend: + serviceName: test + servicePort: 80 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +`, + want: []Manifest{{ + Raw: []byte(`{"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"name":"test-ingress","namespace":"test-namespace"},"spec":{"rules":[{"http":{"paths":[{"backend":{"serviceName":"test","servicePort":80},"path":"/testpath"}]}}]}}`), + GVK: schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}, + }, { + Raw: []byte(`{"apiVersion":"v1","data":{"color":"red","multi-line":"hello world\nhow are you?\n"},"kind":"ConfigMap","metadata":{"name":"a-config","namespace":"default"}}`), + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }}, + }, { + name: "two-resources-with-empty", + raw: ` +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + namespace: test-namespace +spec: + rules: + - http: + paths: + - path: /testpath + backend: + serviceName: test + servicePort: 80 +--- +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +--- +`, + want: []Manifest{{ + Raw: []byte(`{"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"name":"test-ingress","namespace":"test-namespace"},"spec":{"rules":[{"http":{"paths":[{"backend":{"serviceName":"test","servicePort":80},"path":"/testpath"}]}}]}}`), + GVK: schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}, + }, { + Raw: []byte(`{"apiVersion":"v1","data":{"color":"red","multi-line":"hello world\nhow are you?\n"},"kind":"ConfigMap","metadata":{"name":"a-config","namespace":"default"}}`), + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }}, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := ParseManifests(strings.NewReader(test.raw)) + if err != nil { + t.Fatalf("failed to parse manifest: %v", err) + } + + for i := range got { + got[i].obj = nil + } + + if !reflect.DeepEqual(got, test.want) { + t.Fatalf("mismatch found") + } + }) + } + +} + +func TestManifestsFromFiles(t *testing.T) { + tests := []struct { + name string + fs dir + want []Manifest + }{{ + name: "no-files", + fs: dir{ + name: "a", + }, + want: nil, + }, { + name: "all-files", + fs: dir{ + name: "a", + files: []file{{ + name: "f0", + contents: ` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + namespace: test-namespace +spec: + rules: + - http: + paths: + - path: /testpath + backend: + serviceName: test + servicePort: 80 +`, + }, { + name: "f1", + contents: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +`, + }}, + }, + want: []Manifest{{ + GVK: schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}, + }, { + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }}, + }, { + name: "files-with-multiple-manifests", + fs: dir{ + name: "a", + files: []file{{ + name: "f0", + contents: ` +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress + namespace: test-namespace +spec: + rules: + - http: + paths: + - path: /testpath + backend: + serviceName: test + servicePort: 80 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +`, + }, { + name: "f1", + contents: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: a-config + namespace: default +data: + color: "red" + multi-line: | + hello world + how are you? +`, + }}, + }, + want: []Manifest{{ + GVK: schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}, + }, { + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }, { + GVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + }}, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tmpdir, cleanup := setupTestFS(t, test.fs) + defer func() { + if err := cleanup(); err != nil { + t.Logf("error cleaning %q", tmpdir) + } + }() + + files := []string{} + for _, f := range test.fs.files { + files = append(files, filepath.Join(tmpdir, test.fs.name, f.name)) + } + got, err := ManifestsFromFiles(files) + if err != nil { + t.Fatal(err) + } + for i := range got { + got[i].Raw = nil + got[i].obj = nil + } + if !reflect.DeepEqual(got, test.want) { + t.Fatalf("mismatch \ngot: %s \nwant: %s", spew.Sdump(got), spew.Sdump(test.want)) + } + }) + } +} + +type file struct { + name string + contents string +} + +type dir struct { + name string + files []file +} + +// setupTestFS returns path of the tmp d created and cleanup function. +func setupTestFS(t *testing.T, d dir) (string, func() error) { + root, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + dpath := filepath.Join(root, d.name) + if err := os.MkdirAll(dpath, 0755); err != nil { + t.Fatal(err) + } + for _, file := range d.files { + path := filepath.Join(dpath, file.name) + ioutil.WriteFile(path, []byte(file.contents), 0755) + } + cleanup := func() error { + return os.RemoveAll(root) + } + return root, cleanup +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go new file mode 100644 index 0000000000..49ee566301 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apiext.go @@ -0,0 +1,52 @@ +package resourceapply + +import ( + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apiextlistersv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func ApplyCustomResourceDefinition(client apiextclientv1beta1.CustomResourceDefinitionsGetter, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureCustomResourceDefinition(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.CustomResourceDefinitions().Update(existing) + return actual, true, err +} + +func ApplyCustomResourceDefinitionFromCache(lister apiextlistersv1beta1.CustomResourceDefinitionLister, client apiextclientv1beta1.CustomResourceDefinitionsGetter, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { + existing, err := lister.Get(required.Name) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existing = existing.DeepCopy() + modified := pointer.BoolPtr(false) + resourcemerge.EnsureCustomResourceDefinition(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.CustomResourceDefinitions().Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go new file mode 100644 index 0000000000..eaa115d041 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apireg.go @@ -0,0 +1,30 @@ +package resourceapply + +import ( + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregclientv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + "k8s.io/utils/pointer" +) + +func ApplyAPIService(client apiregclientv1.APIServicesGetter, required *apiregv1.APIService) (*apiregv1.APIService, bool, error) { + existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.APIServices().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureAPIService(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.APIServices().Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go new file mode 100644 index 0000000000..d366fd9787 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/apps.go @@ -0,0 +1,97 @@ +package resourceapply + +import ( + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + appslisterv1 "k8s.io/client-go/listers/apps/v1" + "k8s.io/utils/pointer" +) + +// ApplyDeployment applies the required deployment to the cluster. +func ApplyDeployment(client appsclientv1.DeploymentsGetter, required *appsv1.Deployment) (*appsv1.Deployment, bool, error) { + existing, err := client.Deployments(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureDeployment(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.Deployments(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyDeploymentFromCache applies the required deployment to the cluster. +func ApplyDeploymentFromCache(lister appslisterv1.DeploymentLister, client appsclientv1.DeploymentsGetter, required *appsv1.Deployment) (*appsv1.Deployment, bool, error) { + existing, err := lister.Deployments(required.Namespace).Get(required.Name) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existing = existing.DeepCopy() + modified := pointer.BoolPtr(false) + resourcemerge.EnsureDeployment(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.Deployments(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyDaemonSet applies the required daemonset to the cluster. +func ApplyDaemonSet(client appsclientv1.DaemonSetsGetter, required *appsv1.DaemonSet) (*appsv1.DaemonSet, bool, error) { + existing, err := client.DaemonSets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureDaemonSet(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.DaemonSets(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyDaemonSetFromCache applies the required deployment to the cluster. +func ApplyDaemonSetFromCache(lister appslisterv1.DaemonSetLister, client appsclientv1.DaemonSetsGetter, required *appsv1.DaemonSet) (*appsv1.DaemonSet, bool, error) { + existing, err := lister.DaemonSets(required.Namespace).Get(required.Name) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existing = existing.DeepCopy() + modified := pointer.BoolPtr(false) + resourcemerge.EnsureDaemonSet(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.DaemonSets(required.Namespace).Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go new file mode 100644 index 0000000000..95adba2a7c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/batch.go @@ -0,0 +1,31 @@ +package resourceapply + +import ( + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + batchv1 "k8s.io/api/batch/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batchclientv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + "k8s.io/utils/pointer" +) + +// ApplyJob applies the required Job to the cluster. +func ApplyJob(client batchclientv1.JobsGetter, required *batchv1.Job) (*batchv1.Job, bool, error) { + existing, err := client.Jobs(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Jobs(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureJob(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.Jobs(required.Namespace).Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go new file mode 100644 index 0000000000..e465556b6a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/core.go @@ -0,0 +1,102 @@ +package resourceapply + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/utils/pointer" + + "github.com/openshift/cluster-version-operator/lib/resourcemerge" +) + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespace(client coreclientv1.NamespacesGetter, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + existing, err := client.Namespaces().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Namespaces().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !*modified { + return existing, false, nil + } + + actual, err := client.Namespaces().Update(existing) + return actual, true, err +} + +// ApplyService merges objectmeta and requires +// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update +// TODO I've special cased the selector for now +func ApplyService(client coreclientv1.ServicesGetter, required *corev1.Service) (*corev1.Service, bool, error) { + existing, err := client.Services(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Services(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + selectorSame := equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) + typeSame := equality.Semantic.DeepEqual(existing.Spec.Type, required.Spec.Type) + if selectorSame && typeSame && !*modified { + return nil, false, nil + } + existing.Spec.Selector = required.Spec.Selector + existing.Spec.Type = required.Spec.Type // if this is different, the update will fail. Status will indicate it. + + actual, err := client.Services(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyServiceAccount applies the required serviceaccount to the cluster. +func ApplyServiceAccount(client coreclientv1.ServiceAccountsGetter, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + existing, err := client.ServiceAccounts(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ServiceAccounts(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !*modified { + return existing, false, nil + } + + actual, err := client.ServiceAccounts(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyConfigMap applies the required serviceaccount to the cluster. +func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ConfigMaps(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureConfigMap(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.ConfigMaps(required.Namespace).Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go new file mode 100644 index 0000000000..f4168c71ca --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/cv.go @@ -0,0 +1,107 @@ +package resourceapply + +import ( + "fmt" + + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + cvclientv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1" + osclientv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1" + cvlistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1" + oslistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func ApplyOperatorStatus(client osclientv1.ClusterOperatorsGetter, required *osv1.ClusterOperator) (*osv1.ClusterOperator, bool, error) { + if required.Status.Extension.Raw != nil && required.Status.Extension.Object != nil { + return nil, false, fmt.Errorf("both extension.Raw and extension.Object should not be set") + } + existing, err := client.ClusterOperators(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + actual, err := client.ClusterOperators(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureOperatorStatus(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.ClusterOperators(required.Namespace).Update(existing) + return actual, true, err +} + +func ApplyOperatorStatusFromCache(lister oslistersv1.ClusterOperatorLister, client osclientv1.ClusterOperatorsGetter, required *osv1.ClusterOperator) (*osv1.ClusterOperator, bool, error) { + if required.Status.Extension.Raw != nil && required.Status.Extension.Object != nil { + return nil, false, fmt.Errorf("both extension.Raw and extension.Object should not be set") + } + existing, err := lister.ClusterOperators(required.Namespace).Get(required.Name) + if errors.IsNotFound(err) { + actual, err := client.ClusterOperators(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + // Don't want to mutate cache. + existing = existing.DeepCopy() + modified := pointer.BoolPtr(false) + resourcemerge.EnsureOperatorStatus(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.ClusterOperators(required.Namespace).Update(existing) + return actual, true, err +} + +func ApplyCVOConfig(client cvclientv1.CVOConfigsGetter, required *cvv1.CVOConfig) (*cvv1.CVOConfig, bool, error) { + existing, err := client.CVOConfigs(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + actual, err := client.CVOConfigs(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureCVOConfig(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.CVOConfigs(required.Namespace).Update(existing) + return actual, true, err +} + +func ApplyCVOConfigFromCache(lister cvlistersv1.CVOConfigLister, client cvclientv1.CVOConfigsGetter, required *cvv1.CVOConfig) (*cvv1.CVOConfig, bool, error) { + obj, err := lister.CVOConfigs(required.Namespace).Get(required.Name) + if errors.IsNotFound(err) { + actual, err := client.CVOConfigs(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + // Don't want to mutate cache. + existing := new(cvv1.CVOConfig) + obj.DeepCopyInto(existing) + modified := pointer.BoolPtr(false) + resourcemerge.EnsureCVOConfig(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.CVOConfigs(required.Namespace).Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go new file mode 100644 index 0000000000..26a534e12e --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/rbac.go @@ -0,0 +1,94 @@ +package resourceapply + +import ( + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/utils/pointer" +) + +// ApplyClusterRoleBinding applies the required clusterrolebinding to the cluster. +func ApplyClusterRoleBinding(client rbacclientv1.ClusterRoleBindingsGetter, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoleBindings().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureClusterRoleBinding(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.ClusterRoleBindings().Update(existing) + return actual, true, err +} + +// ApplyClusterRole applies the required clusterrole to the cluster. +func ApplyClusterRole(client rbacclientv1.ClusterRolesGetter, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + existing, err := client.ClusterRoles().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoles().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureClusterRole(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.ClusterRoles().Update(existing) + return actual, true, err +} + +// ApplyRoleBinding applies the required clusterrolebinding to the cluster. +func ApplyRoleBinding(client rbacclientv1.RoleBindingsGetter, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.RoleBindings(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureRoleBinding(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.RoleBindings(required.Namespace).Update(existing) + return actual, true, err +} + +// ApplyRole applies the required clusterrole to the cluster. +func ApplyRole(client rbacclientv1.RolesGetter, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Roles(required.Namespace).Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureRole(modified, existing, *required) + if !*modified { + return existing, false, nil + } + + actual, err := client.Roles(required.Namespace).Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go new file mode 100644 index 0000000000..a82e0a7fa4 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceapply/security.go @@ -0,0 +1,31 @@ +package resourceapply + +import ( + securityv1 "github.com/openshift/api/security/v1" + securityclientv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +// ApplySecurityContextConstraints applies the required SecurityContextConstraints to the cluster. +func ApplySecurityContextConstraints(client securityclientv1.SecurityContextConstraintsGetter, required *securityv1.SecurityContextConstraints) (*securityv1.SecurityContextConstraints, bool, error) { + existing, err := client.SecurityContextConstraints().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.SecurityContextConstraints().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := pointer.BoolPtr(false) + resourcemerge.EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !*modified { + return existing, false, nil + } + + actual, err := client.SecurityContextConstraints().Update(existing) + return actual, true, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apiext.go new file mode 100644 index 0000000000..c25646bb4b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apiext.go @@ -0,0 +1,77 @@ +package resourcebuilder + +import ( + "time" + + "github.com/golang/glog" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" +) + +type crdBuilder struct { + client *apiextclientv1beta1.ApiextensionsV1beta1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newCRDBuilder(config *rest.Config, m lib.Manifest) Interface { + return &crdBuilder{ + client: apiextclientv1beta1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *crdBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *crdBuilder) Do() error { + crd := resourceread.ReadCustomResourceDefinitionV1Beta1OrDie(b.raw) + if b.modifier != nil { + b.modifier(crd) + } + _, updated, err := resourceapply.ApplyCustomResourceDefinition(b.client, crd) + if err != nil { + return err + } + if updated { + return waitForCustomResourceDefinitionCompletion(b.client, crd) + } + return nil +} + +const ( + crdPollInterval = 1 * time.Second + crdPollTimeout = 1 * time.Minute +) + +func waitForCustomResourceDefinitionCompletion(client apiextclientv1beta1.CustomResourceDefinitionsGetter, crd *apiextv1beta1.CustomResourceDefinition) error { + return wait.Poll(crdPollInterval, crdPollTimeout, func() (bool, error) { + c, err := client.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // exit early to recreate the crd. + return false, err + } + if err != nil { + glog.Errorf("error getting CustomResourceDefinition %s: %v", crd.Name, err) + return false, nil + } + + for _, condition := range c.Status.Conditions { + if condition.Type == apiextv1beta1.Established && condition.Status == apiextv1beta1.ConditionTrue { + return true, nil + } + } + glog.V(4).Infof("CustomResourceDefinition %s is not ready. conditions: %v", c.Name, c.Status.Conditions) + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apireg.go new file mode 100644 index 0000000000..461f214fcb --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apireg.go @@ -0,0 +1,36 @@ +package resourcebuilder + +import ( + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + "k8s.io/client-go/rest" + apiregclientv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" +) + +type apiServiceBuilder struct { + client *apiregclientv1.ApiregistrationV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newAPIServiceBuilder(config *rest.Config, m lib.Manifest) Interface { + return &apiServiceBuilder{ + client: apiregclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *apiServiceBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *apiServiceBuilder) Do() error { + apiService := resourceread.ReadAPIServiceV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(apiService) + } + _, _, err := resourceapply.ApplyAPIService(b.client, apiService) + return err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apps.go new file mode 100644 index 0000000000..bc5cccfcac --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/apps.go @@ -0,0 +1,146 @@ +package resourcebuilder + +import ( + "fmt" + "time" + + "github.com/golang/glog" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + "k8s.io/client-go/rest" +) + +type deploymentBuilder struct { + client *appsclientv1.AppsV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newDeploymentBuilder(config *rest.Config, m lib.Manifest) Interface { + return &deploymentBuilder{ + client: appsclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *deploymentBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *deploymentBuilder) Do() error { + deployment := resourceread.ReadDeploymentV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(deployment) + } + actual, updated, err := resourceapply.ApplyDeployment(b.client, deployment) + if err != nil { + return err + } + if updated && actual.Generation > 1 { + return waitForDeploymentCompletion(b.client, deployment) + } + return nil +} + +const ( + deploymentPollInterval = 1 * time.Second + deploymentPollTimeout = 5 * time.Minute +) + +func waitForDeploymentCompletion(client appsclientv1.DeploymentsGetter, deployment *appsv1.Deployment) error { + return wait.Poll(deploymentPollInterval, deploymentPollTimeout, func() (bool, error) { + d, err := client.Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // exit early to recreate the deployment. + return false, err + } + if err != nil { + // Do not return error here, as we could be updating the API Server itself, in which case we + // want to continue waiting. + glog.Errorf("error getting Deployment %s during rollout: %v", deployment.Name, err) + return false, nil + } + + if d.DeletionTimestamp != nil { + return false, fmt.Errorf("Deployment %s is being deleted", deployment.Name) + } + + if d.Generation <= d.Status.ObservedGeneration && d.Status.UpdatedReplicas == d.Status.Replicas && d.Status.UnavailableReplicas == 0 { + return true, nil + } + glog.V(4).Infof("Deployment %s is not ready. status: (replicas: %d, updated: %d, ready: %d, unavailable: %d)", d.Name, d.Status.Replicas, d.Status.UpdatedReplicas, d.Status.ReadyReplicas, d.Status.UnavailableReplicas) + return false, nil + }) +} + +type daemonsetBuilder struct { + client *appsclientv1.AppsV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newDaemonsetBuilder(config *rest.Config, m lib.Manifest) Interface { + return &daemonsetBuilder{ + client: appsclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *daemonsetBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *daemonsetBuilder) Do() error { + daemonset := resourceread.ReadDaemonSetV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(daemonset) + } + actual, updated, err := resourceapply.ApplyDaemonSet(b.client, daemonset) + if err != nil { + return err + } + if updated && actual.Generation > 1 { + return waitForDaemonsetRollout(b.client, daemonset) + } + return nil +} + +const ( + daemonsetPollInterval = 1 * time.Second + daemonsetPollTimeout = 5 * time.Minute +) + +func waitForDaemonsetRollout(client appsclientv1.DaemonSetsGetter, daemonset *appsv1.DaemonSet) error { + return wait.Poll(daemonsetPollInterval, daemonsetPollTimeout, func() (bool, error) { + d, err := client.DaemonSets(daemonset.Namespace).Get(daemonset.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // exit early to recreate the daemonset. + return false, err + } + if err != nil { + // Do not return error here, as we could be updating the API Server itself, in which case we + // want to continue waiting. + glog.Errorf("error getting Daemonset %s during rollout: %v", daemonset.Name, err) + return false, nil + } + + if d.DeletionTimestamp != nil { + return false, fmt.Errorf("Daemonset %s is being deleted", daemonset.Name) + } + + if d.Generation <= d.Status.ObservedGeneration && d.Status.UpdatedNumberScheduled == d.Status.DesiredNumberScheduled && d.Status.NumberUnavailable == 0 { + return true, nil + } + glog.V(4).Infof("Daemonset %s is not ready. status: (desired: %d, updated: %d, ready: %d, unavailable: %d)", d.Name, d.Status.DesiredNumberScheduled, d.Status.UpdatedNumberScheduled, d.Status.NumberReady, d.Status.NumberAvailable) + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/batch.go new file mode 100644 index 0000000000..e57953e98a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/batch.go @@ -0,0 +1,83 @@ +package resourcebuilder + +import ( + "fmt" + "time" + + "github.com/golang/glog" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + batchclientv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + "k8s.io/client-go/rest" +) + +type jobBuilder struct { + client *batchclientv1.BatchV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newJobBuilder(config *rest.Config, m lib.Manifest) Interface { + return &jobBuilder{ + client: batchclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *jobBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *jobBuilder) Do() error { + job := resourceread.ReadJobV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(job) + } + _, updated, err := resourceapply.ApplyJob(b.client, job) + if err != nil { + return err + } + if updated { + return WaitForJobCompletion(b.client, job) + } + return nil +} + +const ( + jobPollInterval = 1 * time.Second + jobPollTimeout = 5 * time.Minute +) + +// WaitForJobCompletion waits for job to complete. +func WaitForJobCompletion(client batchclientv1.JobsGetter, job *batchv1.Job) error { + return wait.Poll(jobPollInterval, jobPollTimeout, func() (bool, error) { + j, err := client.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) + if err != nil { + glog.Errorf("error getting Job %s: %v", job.Name, err) + return false, nil + } + + if j.Status.Succeeded > 0 { + return true, nil + } + + // Since we have filled in "activeDeadlineSeconds", + // the Job will 'Active == 0' iff it exceeds the deadline. + // Failed jobs will be recreated in the next run. + if j.Status.Active == 0 && j.Status.Failed > 0 { + reason := "DeadlineExceeded" + message := "Job was active longer than specified deadline" + if len(j.Status.Conditions) > 0 { + reason, message = j.Status.Conditions[0].Reason, j.Status.Conditions[0].Message + } + return false, fmt.Errorf("deadline exceeded, reason: %q, message: %q", reason, message) + } + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/core.go new file mode 100644 index 0000000000..fedab3e666 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/core.go @@ -0,0 +1,117 @@ +package resourcebuilder + +import ( + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" +) + +type serviceAccountBuilder struct { + client *coreclientv1.CoreV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newServiceAccountBuilder(config *rest.Config, m lib.Manifest) Interface { + return &serviceAccountBuilder{ + client: coreclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *serviceAccountBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *serviceAccountBuilder) Do() error { + serviceAccount := resourceread.ReadServiceAccountV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(serviceAccount) + } + _, _, err := resourceapply.ApplyServiceAccount(b.client, serviceAccount) + return err +} + +type configMapBuilder struct { + client *coreclientv1.CoreV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newConfigMapBuilder(config *rest.Config, m lib.Manifest) Interface { + return &configMapBuilder{ + client: coreclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *configMapBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *configMapBuilder) Do() error { + configMap := resourceread.ReadConfigMapV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(configMap) + } + _, _, err := resourceapply.ApplyConfigMap(b.client, configMap) + return err +} + +type namespaceBuilder struct { + client *coreclientv1.CoreV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newNamespaceBuilder(config *rest.Config, m lib.Manifest) Interface { + return &namespaceBuilder{ + client: coreclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *namespaceBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *namespaceBuilder) Do() error { + namespace := resourceread.ReadNamespaceV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(namespace) + } + _, _, err := resourceapply.ApplyNamespace(b.client, namespace) + return err +} + +type serviceBuilder struct { + client *coreclientv1.CoreV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newServiceBuilder(config *rest.Config, m lib.Manifest) Interface { + return &serviceBuilder{ + client: coreclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *serviceBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *serviceBuilder) Do() error { + service := resourceread.ReadServiceV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(service) + } + _, _, err := resourceapply.ApplyService(b.client, service) + return err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/interface.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/interface.go new file mode 100644 index 0000000000..d32bba19ed --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/interface.go @@ -0,0 +1,77 @@ +package resourcebuilder + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + + "github.com/openshift/cluster-version-operator/lib" +) + +var ( + // Mapper is default ResourceMapper. + Mapper = NewResourceMapper() +) + +// ResourceMapper maps {Group, Version} to a function that returns Interface and an error. +type ResourceMapper struct { + l *sync.Mutex + + gvkToNew map[schema.GroupVersionKind]NewInteraceFunc +} + +// AddToMap adds all keys from caller to input. +// Locks the input ResourceMapper before adding the keys from caller. +func (rm *ResourceMapper) AddToMap(irm *ResourceMapper) { + irm.l.Lock() + defer irm.l.Unlock() + for k, v := range rm.gvkToNew { + irm.gvkToNew[k] = v + } +} + +// Exist returns true when gvk is known. +func (rm *ResourceMapper) Exists(gvk schema.GroupVersionKind) bool { + _, ok := rm.gvkToNew[gvk] + return ok +} + +// RegisterGVK adds GVK to NewInteraceFunc mapping. +// It does not lock before adding the mapping. +func (rm *ResourceMapper) RegisterGVK(gvk schema.GroupVersionKind, f NewInteraceFunc) { + rm.gvkToNew[gvk] = f +} + +// NewResourceMapper returns a new map. +// This is required a we cannot push to uninitialized map. +func NewResourceMapper() *ResourceMapper { + m := map[schema.GroupVersionKind]NewInteraceFunc{} + return &ResourceMapper{ + l: &sync.Mutex{}, + gvkToNew: m, + } +} + +type MetaV1ObjectModifierFunc func(metav1.Object) + +// NewInteraceFunc returns an Interface. +// It requires rest Config that can be used to create a client +// and the Manifest. +type NewInteraceFunc func(rest *rest.Config, m lib.Manifest) Interface + +type Interface interface { + WithModifier(MetaV1ObjectModifierFunc) Interface + Do() error +} + +// New returns Interface using the mapping stored in mapper for m Manifest. +func New(mapper *ResourceMapper, rest *rest.Config, m lib.Manifest) (Interface, error) { + f, ok := mapper.gvkToNew[m.GVK] + if !ok { + return nil, fmt.Errorf("No mapping found for gvk: %v", m.GVK) + } + return f(rest, m), nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/rbac.go new file mode 100644 index 0000000000..9189b55b55 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/rbac.go @@ -0,0 +1,117 @@ +package resourcebuilder + +import ( + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/client-go/rest" +) + +type clusterRoleBuilder struct { + client *rbacclientv1.RbacV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newClusterRoleBuilder(config *rest.Config, m lib.Manifest) Interface { + return &clusterRoleBuilder{ + client: rbacclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *clusterRoleBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *clusterRoleBuilder) Do() error { + clusterRole := resourceread.ReadClusterRoleV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(clusterRole) + } + _, _, err := resourceapply.ApplyClusterRole(b.client, clusterRole) + return err +} + +type clusterRoleBindingBuilder struct { + client *rbacclientv1.RbacV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newClusterRoleBindingBuilder(config *rest.Config, m lib.Manifest) Interface { + return &clusterRoleBindingBuilder{ + client: rbacclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *clusterRoleBindingBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *clusterRoleBindingBuilder) Do() error { + clusterRoleBinding := resourceread.ReadClusterRoleBindingV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(clusterRoleBinding) + } + _, _, err := resourceapply.ApplyClusterRoleBinding(b.client, clusterRoleBinding) + return err +} + +type roleBuilder struct { + client *rbacclientv1.RbacV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newRoleBuilder(config *rest.Config, m lib.Manifest) Interface { + return &roleBuilder{ + client: rbacclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *roleBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *roleBuilder) Do() error { + role := resourceread.ReadRoleV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(role) + } + _, _, err := resourceapply.ApplyRole(b.client, role) + return err +} + +type roleBindingBuilder struct { + client *rbacclientv1.RbacV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newRoleBindingBuilder(config *rest.Config, m lib.Manifest) Interface { + return &roleBindingBuilder{ + client: rbacclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *roleBindingBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *roleBindingBuilder) Do() error { + roleBinding := resourceread.ReadRoleBindingV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(roleBinding) + } + _, _, err := resourceapply.ApplyRoleBinding(b.client, roleBinding) + return err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/register.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/register.go new file mode 100644 index 0000000000..5806e540c1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/register.go @@ -0,0 +1,38 @@ +package resourcebuilder + +import ( + securityv1 "github.com/openshift/api/security/v1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" +) + +func init() { + rm := NewResourceMapper() + rm.RegisterGVK(apiextv1beta1.SchemeGroupVersion.WithKind("CustomResourceDefinition"), newCRDBuilder) + rm.RegisterGVK(apiregv1.SchemeGroupVersion.WithKind("APIService"), newAPIServiceBuilder) + rm.RegisterGVK(apiregv1beta1.SchemeGroupVersion.WithKind("APIService"), newAPIServiceBuilder) + rm.RegisterGVK(appsv1.SchemeGroupVersion.WithKind("Deployment"), newDeploymentBuilder) + rm.RegisterGVK(appsv1.SchemeGroupVersion.WithKind("DaemonSet"), newDaemonsetBuilder) + rm.RegisterGVK(batchv1.SchemeGroupVersion.WithKind("Job"), newJobBuilder) + rm.RegisterGVK(corev1.SchemeGroupVersion.WithKind("ServiceAccount"), newServiceAccountBuilder) + rm.RegisterGVK(corev1.SchemeGroupVersion.WithKind("ConfigMap"), newConfigMapBuilder) + rm.RegisterGVK(corev1.SchemeGroupVersion.WithKind("Namespace"), newNamespaceBuilder) + rm.RegisterGVK(corev1.SchemeGroupVersion.WithKind("Service"), newServiceBuilder) + rm.RegisterGVK(rbacv1.SchemeGroupVersion.WithKind("ClusterRole"), newClusterRoleBuilder) + rm.RegisterGVK(rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), newClusterRoleBindingBuilder) + rm.RegisterGVK(rbacv1.SchemeGroupVersion.WithKind("Role"), newRoleBuilder) + rm.RegisterGVK(rbacv1.SchemeGroupVersion.WithKind("RoleBinding"), newRoleBindingBuilder) + rm.RegisterGVK(rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRole"), newClusterRoleBuilder) + rm.RegisterGVK(rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding"), newClusterRoleBindingBuilder) + rm.RegisterGVK(rbacv1beta1.SchemeGroupVersion.WithKind("Role"), newRoleBuilder) + rm.RegisterGVK(rbacv1beta1.SchemeGroupVersion.WithKind("RoleBinding"), newRoleBindingBuilder) + rm.RegisterGVK(securityv1.SchemeGroupVersion.WithKind("SecurityContextConstraints"), newSecurityBuilder) + + rm.AddToMap(Mapper) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/security.go new file mode 100644 index 0000000000..d331dd2ab9 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcebuilder/security.go @@ -0,0 +1,36 @@ +package resourcebuilder + +import ( + securityclientv1 "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourceread" + "k8s.io/client-go/rest" +) + +type securityBuilder struct { + client *securityclientv1.SecurityV1Client + raw []byte + modifier MetaV1ObjectModifierFunc +} + +func newSecurityBuilder(config *rest.Config, m lib.Manifest) Interface { + return &securityBuilder{ + client: securityclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *securityBuilder) WithModifier(f MetaV1ObjectModifierFunc) Interface { + b.modifier = f + return b +} + +func (b *securityBuilder) Do() error { + scc := resourceread.ReadSecurityContextConstraintsV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(scc) + } + _, _, err := resourceapply.ApplySecurityContextConstraints(b.client, scc) + return err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go new file mode 100644 index 0000000000..32e4043f62 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apiext.go @@ -0,0 +1,18 @@ +package resourcemerge + +import ( + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureCustomResourceDefinition ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinition(modified *bool, existing *apiextv1beta1.CustomResourceDefinition, required apiextv1beta1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go new file mode 100644 index 0000000000..d1c58e8b5d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apireg.go @@ -0,0 +1,18 @@ +package resourcemerge + +import ( + "k8s.io/apimachinery/pkg/api/equality" + apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" +) + +// EnsureAPIService ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureAPIService(modified *bool, existing *apiregv1.APIService, required apiregv1.APIService) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go new file mode 100644 index 0000000000..16b2dd5ae8 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/apps.go @@ -0,0 +1,40 @@ +package resourcemerge + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureDeployment ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureDeployment(modified *bool, existing *appsv1.Deployment, required appsv1.Deployment) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + if existing.Spec.Selector == nil { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + + ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) +} + +// EnsureDaemonSet ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureDaemonSet(modified *bool, existing *appsv1.DaemonSet, required appsv1.DaemonSet) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + if existing.Spec.Selector == nil { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + + ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go new file mode 100644 index 0000000000..81ab7bb98a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/batch.go @@ -0,0 +1,28 @@ +package resourcemerge + +import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureJob ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureJob(modified *bool, existing *batchv1.Job, required batchv1.Job) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + if existing.Spec.Selector == nil { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + if !equality.Semantic.DeepEqual(existing.Spec.Selector, required.Spec.Selector) { + *modified = true + existing.Spec.Selector = required.Spec.Selector + } + setInt32Ptr(modified, &existing.Spec.Parallelism, required.Spec.Parallelism) + setInt32Ptr(modified, &existing.Spec.Completions, required.Spec.Completions) + setInt64Ptr(modified, &existing.Spec.ActiveDeadlineSeconds, required.Spec.ActiveDeadlineSeconds) + setInt32Ptr(modified, &existing.Spec.BackoffLimit, required.Spec.BackoffLimit) + setBoolPtr(modified, &existing.Spec.ManualSelector, required.Spec.ManualSelector) + + ensurePodTemplateSpec(modified, &existing.Spec.Template, required.Spec.Template) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go new file mode 100644 index 0000000000..9299429c3e --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/core.go @@ -0,0 +1,503 @@ +package resourcemerge + +import ( + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureConfigMap ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureConfigMap(modified *bool, existing *corev1.ConfigMap, required corev1.ConfigMap) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + mergeMap(modified, &existing.Data, required.Data) +} + +// ensurePodTemplateSpec ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func ensurePodTemplateSpec(modified *bool, existing *corev1.PodTemplateSpec, required corev1.PodTemplateSpec) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + ensurePodSpec(modified, &existing.Spec, required.Spec) +} + +func ensurePodSpec(modified *bool, existing *corev1.PodSpec, required corev1.PodSpec) { + // any container we specify, we require. + for _, required := range required.InitContainers { + var existingCurr *corev1.Container + for j, curr := range existing.InitContainers { + if curr.Name == required.Name { + existingCurr = &existing.InitContainers[j] + break + } + } + if existingCurr == nil { + *modified = true + existing.Containers = append(existing.InitContainers, corev1.Container{}) + existingCurr = &existing.InitContainers[len(existing.InitContainers)-1] + } + ensureContainer(modified, existingCurr, required) + } + + for _, required := range required.Containers { + var existingCurr *corev1.Container + for j, curr := range existing.Containers { + if curr.Name == required.Name { + existingCurr = &existing.Containers[j] + break + } + } + if existingCurr == nil { + *modified = true + existing.Containers = append(existing.Containers, corev1.Container{}) + existingCurr = &existing.Containers[len(existing.Containers)-1] + } + ensureContainer(modified, existingCurr, required) + } + + // any volume we specify, we require. + for _, required := range required.Volumes { + var existingCurr *corev1.Volume + for j, curr := range existing.Volumes { + if curr.Name == required.Name { + existingCurr = &existing.Volumes[j] + break + } + } + if existingCurr == nil { + *modified = true + existing.Volumes = append(existing.Volumes, corev1.Volume{}) + existingCurr = &existing.Volumes[len(existing.Volumes)-1] + } + ensureVolume(modified, existingCurr, required) + } + + if len(required.RestartPolicy) > 0 { + if existing.RestartPolicy != required.RestartPolicy { + *modified = true + existing.RestartPolicy = required.RestartPolicy + } + } + + setStringIfSet(modified, &existing.ServiceAccountName, required.ServiceAccountName) + setBool(modified, &existing.HostNetwork, required.HostNetwork) + mergeMap(modified, &existing.NodeSelector, required.NodeSelector) + ensurePodSecurityContextPtr(modified, &existing.SecurityContext, required.SecurityContext) + ensureAffinityPtr(modified, &existing.Affinity, required.Affinity) + ensureTolerations(modified, &existing.Tolerations, required.Tolerations) + setStringIfSet(modified, &existing.PriorityClassName, required.PriorityClassName) + setInt32Ptr(modified, &existing.Priority, required.Priority) +} + +func ensureContainer(modified *bool, existing *corev1.Container, required corev1.Container) { + setStringIfSet(modified, &existing.Name, required.Name) + setStringIfSet(modified, &existing.Image, required.Image) + + // if you want modify the launch, you need to modify it in the config, not in the launch args + setStringSlice(modified, &existing.Command, required.Command) + setStringSlice(modified, &existing.Args, required.Args) + ensureEnvVar(modified, &existing.Env, required.Env) + ensureEnvFromSource(modified, &existing.EnvFrom, required.EnvFrom) + setStringIfSet(modified, &existing.WorkingDir, required.WorkingDir) + + // any port we specify, we require + for _, required := range required.Ports { + var existingCurr *corev1.ContainerPort + for j, curr := range existing.Ports { + if curr.Name == required.Name { + existingCurr = &existing.Ports[j] + break + } + } + if existingCurr == nil { + *modified = true + existing.Ports = append(existing.Ports, corev1.ContainerPort{}) + existingCurr = &existing.Ports[len(existing.Ports)-1] + } + ensureContainerPort(modified, existingCurr, required) + } + + // any volume mount we specify, we require + for _, required := range required.VolumeMounts { + var existingCurr *corev1.VolumeMount + for j, curr := range existing.VolumeMounts { + if curr.Name == required.Name { + existingCurr = &existing.VolumeMounts[j] + break + } + } + if existingCurr == nil { + *modified = true + existing.VolumeMounts = append(existing.VolumeMounts, corev1.VolumeMount{}) + existingCurr = &existing.VolumeMounts[len(existing.VolumeMounts)-1] + } + ensureVolumeMount(modified, existingCurr, required) + } + + if required.LivenessProbe != nil { + ensureProbePtr(modified, &existing.LivenessProbe, required.LivenessProbe) + } + if required.ReadinessProbe != nil { + ensureProbePtr(modified, &existing.ReadinessProbe, required.ReadinessProbe) + } + + // our security context should always win + ensureSecurityContextPtr(modified, &existing.SecurityContext, required.SecurityContext) +} + +func ensureEnvVar(modified *bool, existing *[]corev1.EnvVar, required []corev1.EnvVar) { + if required == nil { + return + } + if !equality.Semantic.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func ensureEnvFromSource(modified *bool, existing *[]corev1.EnvFromSource, required []corev1.EnvFromSource) { + if required == nil { + return + } + if !equality.Semantic.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func ensureProbePtr(modified *bool, existing **corev1.Probe, required *corev1.Probe) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + if *existing == nil { + *modified = true + *existing = required + return + } + ensureProbe(modified, *existing, *required) +} + +func ensureProbe(modified *bool, existing *corev1.Probe, required corev1.Probe) { + setInt32(modified, &existing.InitialDelaySeconds, required.InitialDelaySeconds) + + ensureProbeHandler(modified, &existing.Handler, required.Handler) +} + +func ensureProbeHandler(modified *bool, existing *corev1.Handler, required corev1.Handler) { + if !equality.Semantic.DeepEqual(required, *existing) { + *modified = true + *existing = required + } +} + +func ensureContainerPort(modified *bool, existing *corev1.ContainerPort, required corev1.ContainerPort) { + if !equality.Semantic.DeepEqual(required, *existing) { + *modified = true + *existing = required + } +} + +func ensureVolumeMount(modified *bool, existing *corev1.VolumeMount, required corev1.VolumeMount) { + if !equality.Semantic.DeepEqual(required, *existing) { + *modified = true + *existing = required + } +} + +func ensureVolume(modified *bool, existing *corev1.Volume, required corev1.Volume) { + if !equality.Semantic.DeepEqual(required, *existing) { + *modified = true + *existing = required + } +} + +func ensureSecurityContextPtr(modified *bool, existing **corev1.SecurityContext, required *corev1.SecurityContext) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + ensureSecurityContext(modified, *existing, *required) +} + +func ensureSecurityContext(modified *bool, existing *corev1.SecurityContext, required corev1.SecurityContext) { + ensureCapabilitiesPtr(modified, &existing.Capabilities, required.Capabilities) + ensureSELinuxOptionsPtr(modified, &existing.SELinuxOptions, required.SELinuxOptions) + setBoolPtr(modified, &existing.Privileged, required.Privileged) + setInt64Ptr(modified, &existing.RunAsUser, required.RunAsUser) + setBoolPtr(modified, &existing.RunAsNonRoot, required.RunAsNonRoot) + setBoolPtr(modified, &existing.ReadOnlyRootFilesystem, required.ReadOnlyRootFilesystem) + setBoolPtr(modified, &existing.AllowPrivilegeEscalation, required.AllowPrivilegeEscalation) +} + +func ensureCapabilitiesPtr(modified *bool, existing **corev1.Capabilities, required *corev1.Capabilities) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + ensureCapabilities(modified, *existing, *required) +} + +func ensureCapabilities(modified *bool, existing *corev1.Capabilities, required corev1.Capabilities) { + // any Add we specify, we require. + for _, required := range required.Add { + found := false + for _, curr := range existing.Add { + if equality.Semantic.DeepEqual(curr, required) { + found = true + break + } + } + if !found { + *modified = true + existing.Add = append(existing.Add, required) + } + } + + // any Drop we specify, we require. + for _, required := range required.Drop { + found := false + for _, curr := range existing.Drop { + if equality.Semantic.DeepEqual(curr, required) { + found = true + break + } + } + if !found { + *modified = true + existing.Drop = append(existing.Drop, required) + } + } +} + +func setStringSliceIfSet(modified *bool, existing *[]string, required []string) { + if required == nil { + return + } + if !equality.Semantic.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func setStringSlice(modified *bool, existing *[]string, required []string) { + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func mergeStringSlice(modified *bool, existing *[]string, required []string) { + for _, required := range required { + found := false + for _, curr := range *existing { + if required == curr { + found = true + break + } + } + if !found { + *modified = true + *existing = append(*existing, required) + } + } +} + +func ensureTolerations(modified *bool, existing *[]corev1.Toleration, required []corev1.Toleration) { + for ridx := range required { + found := false + for eidx := range *existing { + if required[ridx].Key == (*existing)[eidx].Key { + found = true + if !equality.Semantic.DeepEqual((*existing)[eidx], required[ridx]) { + *modified = true + (*existing)[eidx] = required[ridx] + } + break + } + } + if !found { + *modified = true + *existing = append(*existing, required[ridx]) + } + } +} + +func ensureAffinityPtr(modified *bool, existing **corev1.Affinity, required *corev1.Affinity) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + ensureAffinity(modified, *existing, *required) +} + +func ensureAffinity(modified *bool, existing *corev1.Affinity, required corev1.Affinity) { + if !equality.Semantic.DeepEqual(existing.NodeAffinity, required.NodeAffinity) { + *modified = true + (*existing).NodeAffinity = required.NodeAffinity + } + if !equality.Semantic.DeepEqual(existing.PodAffinity, required.PodAffinity) { + *modified = true + (*existing).PodAffinity = required.PodAffinity + } + if !equality.Semantic.DeepEqual(existing.PodAntiAffinity, required.PodAntiAffinity) { + *modified = true + (*existing).PodAntiAffinity = required.PodAntiAffinity + } +} + +func ensurePodSecurityContextPtr(modified *bool, existing **corev1.PodSecurityContext, required *corev1.PodSecurityContext) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + ensurePodSecurityContext(modified, *existing, *required) +} + +func ensurePodSecurityContext(modified *bool, existing *corev1.PodSecurityContext, required corev1.PodSecurityContext) { + ensureSELinuxOptionsPtr(modified, &existing.SELinuxOptions, required.SELinuxOptions) + setInt64Ptr(modified, &existing.RunAsUser, required.RunAsUser) + setInt64Ptr(modified, &existing.RunAsGroup, required.RunAsGroup) + setBoolPtr(modified, &existing.RunAsNonRoot, required.RunAsNonRoot) + + // any SupplementalGroups we specify, we require. + for _, required := range required.SupplementalGroups { + found := false + for _, curr := range existing.SupplementalGroups { + if curr == required { + found = true + break + } + } + if !found { + *modified = true + existing.SupplementalGroups = append(existing.SupplementalGroups, required) + } + } + + setInt64Ptr(modified, &existing.FSGroup, required.FSGroup) + + // any SupplementalGroups we specify, we require. + for _, required := range required.Sysctls { + found := false + for j, curr := range existing.Sysctls { + if curr.Name == required.Name { + found = true + if curr.Value != required.Value { + *modified = true + existing.Sysctls[j] = required + } + break + } + } + if !found { + *modified = true + existing.Sysctls = append(existing.Sysctls, required) + } + } +} + +func ensureSELinuxOptionsPtr(modified *bool, existing **corev1.SELinuxOptions, required *corev1.SELinuxOptions) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + ensureSELinuxOptions(modified, *existing, *required) +} + +func ensureSELinuxOptions(modified *bool, existing *corev1.SELinuxOptions, required corev1.SELinuxOptions) { + setStringIfSet(modified, &existing.User, required.User) + setStringIfSet(modified, &existing.Role, required.Role) + setStringIfSet(modified, &existing.Type, required.Type) + setStringIfSet(modified, &existing.Level, required.Level) +} + +func setBool(modified *bool, existing *bool, required bool) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setBoolPtr(modified *bool, existing **bool, required *bool) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + setBool(modified, *existing, *required) +} + +func setInt32(modified *bool, existing *int32, required int32) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt32Ptr(modified *bool, existing **int32, required *int32) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + setInt32(modified, *existing, *required) +} + +func setInt64(modified *bool, existing *int64, required int64) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt64Ptr(modified *bool, existing **int64, required *int64) { + // if we have no required, then we don't care what someone else has set + if required == nil { + return + } + + if *existing == nil { + *modified = true + *existing = required + return + } + setInt64(modified, *existing, *required) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go new file mode 100644 index 0000000000..a5cf7a59d9 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/cv.go @@ -0,0 +1,32 @@ +package resourcemerge + +import ( + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" +) + +func EnsureCVOConfig(modified *bool, existing *cvv1.CVOConfig, required cvv1.CVOConfig) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if existing.Upstream != required.Upstream { + *modified = true + existing.Upstream = required.Upstream + } + if existing.Channel != required.Channel { + *modified = true + existing.Channel = required.Channel + } + if existing.ClusterID != required.ClusterID { + *modified = true + existing.ClusterID = required.ClusterID + } + + if required.DesiredUpdate.Payload != "" && + existing.DesiredUpdate.Payload != required.DesiredUpdate.Payload { + *modified = true + existing.DesiredUpdate.Payload = required.DesiredUpdate.Payload + } + if required.DesiredUpdate.Version != "" && + existing.DesiredUpdate.Version != required.DesiredUpdate.Version { + *modified = true + existing.DesiredUpdate.Version = required.DesiredUpdate.Version + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go new file mode 100644 index 0000000000..748a37f48b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta.go @@ -0,0 +1,58 @@ +package resourcemerge + +import ( + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureObjectMeta ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { + setStringIfSet(modified, &existing.Namespace, required.Namespace) + setStringIfSet(modified, &existing.Name, required.Name) + mergeMap(modified, &existing.Labels, required.Labels) + mergeMap(modified, &existing.Annotations, required.Annotations) + mergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) +} + +func setStringIfSet(modified *bool, existing *string, required string) { + if len(required) == 0 { + return + } + if required != *existing { + *existing = required + *modified = true + } +} + +func mergeMap(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + for k, v := range required { + if existingV, ok := (*existing)[k]; !ok || v != existingV { + *modified = true + (*existing)[k] = v + } + } +} + +func mergeOwnerRefs(modified *bool, existing *[]metav1.OwnerReference, required []metav1.OwnerReference) { + for ridx := range required { + found := false + for eidx := range *existing { + if required[ridx].UID == (*existing)[eidx].UID { + found = true + if !equality.Semantic.DeepEqual((*existing)[eidx], required[ridx]) { + *modified = true + (*existing)[eidx] = required[ridx] + } + break + } + } + if !found { + *modified = true + *existing = append(*existing, required[ridx]) + } + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta_test.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta_test.go new file mode 100644 index 0000000000..f19fa4661d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/meta_test.go @@ -0,0 +1,110 @@ +package resourcemerge + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" +) + +func TestMergeOwnerRefs(t *testing.T) { + tests := []struct { + existing []metav1.OwnerReference + input []metav1.OwnerReference + + expectedModified bool + expected []metav1.OwnerReference + }{{ + existing: []metav1.OwnerReference{}, + input: []metav1.OwnerReference{}, + + expectedModified: false, + expected: []metav1.OwnerReference{}, + }, { + existing: []metav1.OwnerReference{}, + input: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + + expectedModified: true, + expected: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + }, { + existing: []metav1.OwnerReference{{ + UID: types.UID("uid-2"), + }, { + UID: types.UID("uid-3"), + }}, + input: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + + expectedModified: true, + expected: []metav1.OwnerReference{{ + UID: types.UID("uid-2"), + }, { + UID: types.UID("uid-3"), + }, { + UID: types.UID("uid-1"), + }}, + }, { + existing: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + input: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + + expectedModified: false, + expected: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + }, { + existing: []metav1.OwnerReference{{ + UID: types.UID("uid-1"), + }}, + input: []metav1.OwnerReference{{ + Controller: pointer.BoolPtr(true), + UID: types.UID("uid-1"), + }}, + + expectedModified: true, + expected: []metav1.OwnerReference{{ + Controller: pointer.BoolPtr(true), + UID: types.UID("uid-1"), + }}, + }, { + existing: []metav1.OwnerReference{{ + Controller: pointer.BoolPtr(false), + UID: types.UID("uid-1"), + }}, + input: []metav1.OwnerReference{{ + Controller: pointer.BoolPtr(true), + UID: types.UID("uid-1"), + }}, + + expectedModified: true, + expected: []metav1.OwnerReference{{ + Controller: pointer.BoolPtr(true), + UID: types.UID("uid-1"), + }}, + }} + + for idx, test := range tests { + t.Run(fmt.Sprintf("test#%d", idx), func(t *testing.T) { + modified := pointer.BoolPtr(false) + mergeOwnerRefs(modified, &test.existing, test.input) + if *modified != test.expectedModified { + t.Fatalf("mismatch modified got: %v want: %v", *modified, test.expectedModified) + } + + if !equality.Semantic.DeepEqual(test.existing, test.expected) { + t.Fatalf("mismatch ownerefs got: %v want: %v", test.existing, test.expected) + } + }) + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go new file mode 100644 index 0000000000..fa9efac3ba --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/os.go @@ -0,0 +1,95 @@ +package resourcemerge + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" +) + +func EnsureOperatorStatus(modified *bool, existing *osv1.ClusterOperator, required osv1.ClusterOperator) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + ensureOperatorStatusStatus(modified, &existing.Status, required.Status) +} + +func ensureOperatorStatusStatus(modified *bool, existing *osv1.ClusterOperatorStatus, required osv1.ClusterOperatorStatus) { + if !equality.Semantic.DeepEqual(existing.Conditions, required.Conditions) { + *modified = true + existing.Conditions = required.Conditions + } + if existing.Version != required.Version { + *modified = true + existing.Version = required.Version + } + if !equality.Semantic.DeepEqual(existing.Extension.Raw, required.Extension.Raw) { + *modified = true + existing.Extension.Raw = required.Extension.Raw + } + if !equality.Semantic.DeepEqual(existing.Extension.Object, required.Extension.Object) { + *modified = true + existing.Extension.Object = required.Extension.Object + } +} + +func SetOperatorStatusCondition(conditions *[]osv1.ClusterOperatorStatusCondition, newCondition osv1.ClusterOperatorStatusCondition) { + if conditions == nil { + conditions = &[]osv1.ClusterOperatorStatusCondition{} + } + existingCondition := FindOperatorStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorStatusCondition(conditions *[]osv1.ClusterOperatorStatusCondition, conditionType osv1.ClusterStatusConditionType) { + if conditions == nil { + conditions = &[]osv1.ClusterOperatorStatusCondition{} + } + newConditions := []osv1.ClusterOperatorStatusCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorStatusCondition(conditions []osv1.ClusterOperatorStatusCondition, conditionType osv1.ClusterStatusConditionType) *osv1.ClusterOperatorStatusCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorStatusConditionTrue(conditions []osv1.ClusterOperatorStatusCondition, conditionType osv1.ClusterStatusConditionType) bool { + return IsOperatorStatusConditionPresentAndEqual(conditions, conditionType, osv1.ConditionTrue) +} + +func IsOperatorStatusConditionFalse(conditions []osv1.ClusterOperatorStatusCondition, conditionType osv1.ClusterStatusConditionType) bool { + return IsOperatorStatusConditionPresentAndEqual(conditions, conditionType, osv1.ConditionFalse) +} + +func IsOperatorStatusConditionPresentAndEqual(conditions []osv1.ClusterOperatorStatusCondition, conditionType osv1.ClusterStatusConditionType, status osv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go new file mode 100644 index 0000000000..e39e843add --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/rbac.go @@ -0,0 +1,54 @@ +package resourcemerge + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureClusterRoleBinding ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureClusterRoleBinding(modified *bool, existing *rbacv1.ClusterRoleBinding, required rbacv1.ClusterRoleBinding) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !equality.Semantic.DeepEqual(existing.Subjects, required.Subjects) { + *modified = true + existing.Subjects = required.Subjects + } + if !equality.Semantic.DeepEqual(existing.RoleRef, required.RoleRef) { + *modified = true + existing.RoleRef = required.RoleRef + } +} + +// EnsureClusterRole ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureClusterRole(modified *bool, existing *rbacv1.ClusterRole, required rbacv1.ClusterRole) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !equality.Semantic.DeepEqual(existing.Rules, required.Rules) { + *modified = true + existing.Rules = required.Rules + } +} + +// EnsureRoleBinding ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureRoleBinding(modified *bool, existing *rbacv1.RoleBinding, required rbacv1.RoleBinding) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !equality.Semantic.DeepEqual(existing.Subjects, required.Subjects) { + *modified = true + existing.Subjects = required.Subjects + } + if !equality.Semantic.DeepEqual(existing.RoleRef, required.RoleRef) { + *modified = true + existing.RoleRef = required.RoleRef + } +} + +// EnsureRole ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureRole(modified *bool, existing *rbacv1.Role, required rbacv1.Role) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + if !equality.Semantic.DeepEqual(existing.Rules, required.Rules) { + *modified = true + existing.Rules = required.Rules + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go new file mode 100644 index 0000000000..9ad63dddfc --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourcemerge/security.go @@ -0,0 +1,108 @@ +package resourcemerge + +import ( + securityv1 "github.com/openshift/api/security/v1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureSecurityContextConstraints ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureSecurityContextConstraints(modified *bool, existing *securityv1.SecurityContextConstraints, required securityv1.SecurityContextConstraints) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + setInt32Ptr(modified, &existing.Priority, required.Priority) + setBool(modified, &existing.AllowPrivilegedContainer, required.AllowPrivilegedContainer) + for _, required := range required.DefaultAddCapabilities { + found := false + for _, curr := range existing.DefaultAddCapabilities { + if equality.Semantic.DeepEqual(required, curr) { + found = true + break + } + } + if !found { + *modified = true + existing.DefaultAddCapabilities = append(existing.DefaultAddCapabilities, required) + } + } + for _, required := range required.RequiredDropCapabilities { + found := false + for _, curr := range existing.RequiredDropCapabilities { + if equality.Semantic.DeepEqual(required, curr) { + found = true + break + } + } + if !found { + *modified = true + existing.RequiredDropCapabilities = append(existing.RequiredDropCapabilities, required) + } + } + for _, required := range required.AllowedCapabilities { + found := false + for _, curr := range existing.AllowedCapabilities { + if equality.Semantic.DeepEqual(required, curr) { + found = true + break + } + } + if !found { + *modified = true + existing.AllowedCapabilities = append(existing.AllowedCapabilities, required) + } + } + setBool(modified, &existing.AllowHostDirVolumePlugin, required.AllowHostDirVolumePlugin) + for _, required := range required.Volumes { + found := false + for _, curr := range existing.Volumes { + if equality.Semantic.DeepEqual(required, curr) { + found = true + break + } + } + if !found { + *modified = true + existing.Volumes = append(existing.Volumes, required) + } + } + for _, required := range required.AllowedFlexVolumes { + found := false + for _, curr := range existing.AllowedFlexVolumes { + if equality.Semantic.DeepEqual(required.Driver, curr.Driver) { + found = true + break + } + } + if !found { + *modified = true + existing.AllowedFlexVolumes = append(existing.AllowedFlexVolumes, required) + } + } + setBool(modified, &existing.AllowHostNetwork, required.AllowHostNetwork) + setBool(modified, &existing.AllowHostPorts, required.AllowHostPorts) + setBool(modified, &existing.AllowHostPID, required.AllowHostPID) + setBool(modified, &existing.AllowHostIPC, required.AllowHostIPC) + setBoolPtr(modified, &existing.DefaultAllowPrivilegeEscalation, required.DefaultAllowPrivilegeEscalation) + setBoolPtr(modified, &existing.AllowPrivilegeEscalation, required.AllowPrivilegeEscalation) + if !equality.Semantic.DeepEqual(existing.SELinuxContext, required.SELinuxContext) { + *modified = true + existing.SELinuxContext = required.SELinuxContext + } + if !equality.Semantic.DeepEqual(existing.RunAsUser, required.RunAsUser) { + *modified = true + existing.RunAsUser = required.RunAsUser + } + if !equality.Semantic.DeepEqual(existing.FSGroup, required.FSGroup) { + *modified = true + existing.FSGroup = required.FSGroup + } + if !equality.Semantic.DeepEqual(existing.SupplementalGroups, required.SupplementalGroups) { + *modified = true + existing.SupplementalGroups = required.SupplementalGroups + } + setBool(modified, &existing.ReadOnlyRootFilesystem, required.ReadOnlyRootFilesystem) + mergeStringSlice(modified, &existing.Users, required.Users) + mergeStringSlice(modified, &existing.Groups, required.Groups) + mergeStringSlice(modified, &existing.SeccompProfiles, required.SeccompProfiles) + mergeStringSlice(modified, &existing.AllowedUnsafeSysctls, required.AllowedUnsafeSysctls) + mergeStringSlice(modified, &existing.ForbiddenSysctls, required.ForbiddenSysctls) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go new file mode 100644 index 0000000000..ed6fbeb169 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go @@ -0,0 +1,27 @@ +package resourceread + +import ( + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + apiExtensionsScheme = runtime.NewScheme() + apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme) +) + +func init() { + if err := apiextv1beta1.AddToScheme(apiExtensionsScheme); err != nil { + panic(err) + } +} + +// ReadCustomResourceDefinitionV1Beta1OrDie reads crd object from bytes. Panics on error. +func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextv1beta1.CustomResourceDefinition { + requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextv1beta1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiextv1beta1.CustomResourceDefinition) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go new file mode 100644 index 0000000000..f8915b50ef --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go @@ -0,0 +1,31 @@ +package resourceread + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" +) + +var ( + apiRegScheme = runtime.NewScheme() + apiRegCodecs = serializer.NewCodecFactory(apiRegScheme) +) + +func init() { + if err := apiregv1beta1.AddToScheme(apiRegScheme); err != nil { + panic(err) + } + if err := apiregv1.AddToScheme(apiRegScheme); err != nil { + panic(err) + } +} + +// ReadAPIServiceV1OrDie reads aiservice object from bytes. Panics on error. +func ReadAPIServiceV1OrDie(objBytes []byte) *apiregv1.APIService { + requiredObj, err := runtime.Decode(apiRegCodecs.UniversalDecoder(apiregv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiregv1.APIService) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go new file mode 100644 index 0000000000..e62c12e317 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go @@ -0,0 +1,36 @@ +package resourceread + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + appsScheme = runtime.NewScheme() + appsCodecs = serializer.NewCodecFactory(appsScheme) +) + +func init() { + if err := appsv1.AddToScheme(appsScheme); err != nil { + panic(err) + } +} + +// ReadDeploymentV1OrDie reads deployment object from bytes. Panics on error. +func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.Deployment) +} + +// ReadDaemonSetV1OrDie reads daemonset object from bytes. Panics on error. +func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.DaemonSet) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go new file mode 100644 index 0000000000..e5f5a4a581 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go @@ -0,0 +1,27 @@ +package resourceread + +import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + batchScheme = runtime.NewScheme() + batchCodecs = serializer.NewCodecFactory(batchScheme) +) + +func init() { + if err := batchv1.AddToScheme(batchScheme); err != nil { + panic(err) + } +} + +// ReadJobV1OrDie reads Job object from bytes. Panics on error. +func ReadJobV1OrDie(objBytes []byte) *batchv1.Job { + requiredObj, err := runtime.Decode(batchCodecs.UniversalDecoder(batchv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*batchv1.Job) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go new file mode 100644 index 0000000000..644b5c3512 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go @@ -0,0 +1,52 @@ +package resourceread + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + coreScheme = runtime.NewScheme() + coreCodecs = serializer.NewCodecFactory(coreScheme) +) + +func init() { + if err := corev1.AddToScheme(coreScheme); err != nil { + panic(err) + } +} + +// ReadConfigMapV1OrDie reads configmap object from bytes. Panics on error. +func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ConfigMap) +} + +// ReadServiceAccountV1OrDie reads serviceaccount object from bytes. Panics on error. +func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ServiceAccount) +} + +func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Namespace) +} + +func ReadServiceV1OrDie(objBytes []byte) *corev1.Service { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Service) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go new file mode 100644 index 0000000000..1812b3b6f7 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go @@ -0,0 +1,27 @@ +package resourceread + +import ( + imagev1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + imageScheme = runtime.NewScheme() + imageCodecs = serializer.NewCodecFactory(imageScheme) +) + +func init() { + if err := imagev1.AddToScheme(imageScheme); err != nil { + panic(err) + } +} + +// ReadImageStreamV1OrDie reads imagestream object from bytes. Panics on error. +func ReadImageStreamV1OrDie(objBytes []byte) *imagev1.ImageStream { + requiredObj, err := runtime.Decode(imageCodecs.UniversalDecoder(imagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*imagev1.ImageStream) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go new file mode 100644 index 0000000000..6eb4e4a3a0 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go @@ -0,0 +1,58 @@ +package resourceread + +import ( + rbacv1 "k8s.io/api/rbac/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + rbacScheme = runtime.NewScheme() + rbacCodecs = serializer.NewCodecFactory(rbacScheme) +) + +func init() { + if err := rbacv1.AddToScheme(rbacScheme); err != nil { + panic(err) + } + if err := rbacv1beta1.AddToScheme(rbacScheme); err != nil { + panic(err) + } +} + +// ReadClusterRoleBindingV1OrDie reads clusterrolebinding object from bytes. Panics on error. +func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRoleBinding) +} + +// ReadClusterRoleV1OrDie reads clusterole object from bytes. Panics on error. +func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRole) +} + +// ReadRoleBindingV1OrDie reads clusterrolebinding object from bytes. Panics on error. +func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.RoleBinding) +} + +// ReadRoleV1OrDie reads clusterole object from bytes. Panics on error. +func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.Role) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go new file mode 100644 index 0000000000..d2e9dbd121 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go @@ -0,0 +1,27 @@ +package resourceread + +import ( + securityv1 "github.com/openshift/api/security/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + securityScheme = runtime.NewScheme() + securityCodecs = serializer.NewCodecFactory(securityScheme) +) + +func init() { + if err := securityv1.AddToScheme(securityScheme); err != nil { + panic(err) + } +} + +// ReadSecurityContextConstraintsV1OrDie reads clusterrolebinding object from bytes. Panics on error. +func ReadSecurityContextConstraintsV1OrDie(objBytes []byte) *securityv1.SecurityContextConstraints { + requiredObj, err := runtime.Decode(securityCodecs.UniversalDecoder(securityv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*securityv1.SecurityContextConstraints) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/apis.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/apis.go new file mode 100644 index 0000000000..01ba749f35 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/apis.go @@ -0,0 +1,7 @@ +package apis + +// ClusterVersionGroupName defines the API group for clusterversion. +const ClusterVersionGroupName = "clusterversion.openshift.io" + +// OperatorStatusGroupName defines the API group for operatorstatus. +const OperatorStatusGroupName = "operatorstatus.openshift.io" diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cluster_id.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cluster_id.go new file mode 100644 index 0000000000..33fe9cf07a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cluster_id.go @@ -0,0 +1,30 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/google/uuid" +) + +// UnmarshalJSON unmarshals RFC4122 uuid from string. +func (cid *ClusterID) UnmarshalJSON(b []byte) error { + var strid string + if err := json.Unmarshal(b, &strid); err != nil { + return err + } + + uid, err := uuid.Parse(strid) + if err != nil { + return err + } + if uid.Variant() != uuid.RFC4122 { + return fmt.Errorf("invalid ClusterID %q, must be an RFC4122-variant UUID: found %s", strid, uid.Variant()) + } + if uid.Version() != 4 { + return fmt.Errorf("Invalid ClusterID %q, must be a version-4 UUID: found %s", strid, uid.Version()) + } + + *cid = ClusterID(uid.String()) + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cvoconfig.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cvoconfig.go new file mode 100644 index 0000000000..a3461ebd22 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/cvoconfig.go @@ -0,0 +1,28 @@ +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyObject copies the CVOConfig into an Object. This doesn't actually +// require a deep copy, but the code generator (and Go itself) isn't advanced +// enough to determine that. +func (c *CVOConfig) DeepCopyObject() runtime.Object { + out := *c + c.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return &out +} + +// DeepCopyInto copies the CVOConfig into another CVOConfig. This doesn't +// actually require a deep copy, but the code generator (and Go itself) isn't +// advanced enough to determine that. +func (c *CVOConfig) DeepCopyInto(out *CVOConfig) { + *out = *c + c.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +func (c CVOConfig) String() string { + return fmt.Sprintf("{ Upstream: %s Channel: %s ClusterID: %s }", c.Upstream, c.Channel, c.ClusterID) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/register.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/register.go new file mode 100644 index 0000000000..28b3fc5e8f --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + "github.com/openshift/cluster-version-operator/pkg/apis" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: apis.ClusterVersionGroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified +// GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder for ClusterVersionOperator's types. + SchemeBuilder runtime.SchemeBuilder + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + localSchemeBuilder = &SchemeBuilder + // AddToScheme is the function alias for AddtoScheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of + // the generated functions takes place in the generated files. The + // separation makes the code compile even when the generated files are + // missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CVOConfig{}, + &CVOStatus{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/types.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/types.go new file mode 100644 index 0000000000..8722423f82 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/types.go @@ -0,0 +1,73 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CVOConfigList is a list of CVOConfig resources. +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CVOConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CVOConfig `json:"items"` +} + +// CVOConfig is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +// +genclient +type CVOConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Upstream URL `json:"upstream"` + Channel string `json:"channel"` + ClusterID ClusterID `json:"clusterID"` + + DesiredUpdate Update `json:"desiredUpdate"` + + // Overrides is list of overides for components that are managed by + // cluster version operator + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +type ComponentOverride struct { + // Kind should match the TypeMeta.Kind for object. + Kind string `json:"kind"` + + // The Namespace and Name for the component. + Namespace string `json:"namespace"` + Name string `json:"name"` + + // Unmanaged controls if cluster version operator should stop managing. + // Default: false + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// CVOStatus contains information specific to the ClusterVersionOperator. This +// object is inserted into the Extension attribute of the generic +// OperatorStatus object. +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CVOStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + AvailableUpdates []Update `json:"availableUpdates"` +} + +// Update represents a release of the ClusterVersionOperator, referenced by the +// Payload member. +type Update struct { + Version string `json:"version"` + Payload string `json:"payload"` +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/url.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/url.go new file mode 100644 index 0000000000..82bc804e25 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/url.go @@ -0,0 +1,22 @@ +package v1 + +import ( + "encoding/json" + "net/url" +) + +// UnmarshalJSON unmarshals a URL, ensuring that it is valid. +func (u *URL) UnmarshalJSON(data []byte) error { + var raw string + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if _, err := url.Parse(raw); err != nil { + return err + } + + *u = URL(raw) + + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..528644fb7c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CVOConfigList) DeepCopyInto(out *CVOConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CVOConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CVOConfigList. +func (in *CVOConfigList) DeepCopy() *CVOConfigList { + if in == nil { + return nil + } + out := new(CVOConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CVOConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CVOStatus) DeepCopyInto(out *CVOStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Update, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CVOStatus. +func (in *CVOStatus) DeepCopy() *CVOStatus { + if in == nil { + return nil + } + out := new(CVOStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CVOStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/register.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/register.go new file mode 100644 index 0000000000..c3d1c82e00 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + "github.com/openshift/cluster-version-operator/pkg/apis" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: apis.OperatorStatusGroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified +// GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder for ClusterVersionOperator's types. + SchemeBuilder runtime.SchemeBuilder + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + localSchemeBuilder = &SchemeBuilder + // AddToScheme is the function alias for AddtoScheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of + // the generated functions takes place in the generated files. The + // separation makes the code compile even when the generated files are + // missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterOperator{}, + &ClusterOperatorList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/types.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/types.go new file mode 100644 index 0000000000..75083bfad2 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/types.go @@ -0,0 +1,106 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} + +// ClusterOperator is the Custom Resource object which holds the current state +// of an operator. This object is used by operators to convey their state to +// the rest of the cluster. +// +genclient +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Spec hold the intent of how this operator should behave. + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the kube ecosystem. + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's reconciliation functionality. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ClusterOperatorStatusCondition `json:"conditions"` + + // version indicates which version of the operator updated the current + // status object. + Version string `json:"version"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + Extension runtime.RawExtension `json:"extension,omitempty"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the state of the operator's reconciliation functionality. + Type ClusterStatusConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + + // LastTransitionTime is the time of the last update to the current status object. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the reason for the condition's last transition. Reasons are CamelCase + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is the state of the operator's reconciliation functionality. +type ClusterStatusConditionType string + +const ( + // OperatorAvailable indicates that the binary maintained by the operator (eg: openshift-apiserver for the + // openshift-apiserver-operator), is functional and available in the cluster. + OperatorAvailable ClusterStatusConditionType = "Available" + + // OperatorProgressing indicates that the operator is actively making changes to the binary maintained by the + // operator (eg: openshift-apiserver for the openshift-apiserver-operator). + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // OperatorFailing indicates that the operator has encountered an error that is preventing it from working properly. + // The binary maintained by the operator (eg: openshift-apiserver for the openshift-apiserver-operator) may still be + // available, but the user intent cannot be fulfilled. + OperatorFailing ClusterStatusConditionType = "Failing" +) diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..400e0e54b6 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate.go b/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate.go new file mode 100644 index 0000000000..8689bbdf96 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate.go @@ -0,0 +1,229 @@ +package autoupdate + +import ( + "fmt" + "sort" + "time" + + "github.com/blang/semver" + + "github.com/golang/glog" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + clientset "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" + cvinformersv1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1" + osinformersv1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1" + cvlistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1" + oslistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +const ( + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +// Controller defines autoupdate controller. +type Controller struct { + // namespace and name are used to find the CVOConfig, ClusterOperator. + namespace, name string + + client clientset.Interface + eventRecorder record.EventRecorder + + syncHandler func(key string) error + + cvoConfigLister cvlistersv1.CVOConfigLister + clusterOperatorLister oslistersv1.ClusterOperatorLister + + cvoConfigListerSynced cache.InformerSynced + operatorStatusSynced cache.InformerSynced + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +// New returns a new autoupdate controller. +func New( + namespace, name string, + cvoConfigInformer cvinformersv1.CVOConfigInformer, + clusterOperatorInformer osinformersv1.ClusterOperatorInformer, + client clientset.Interface, + kubeClient kubernetes.Interface, +) *Controller { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + + ctrl := &Controller{ + namespace: namespace, + name: name, + client: client, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "autoupdater"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoupdater"), + } + + cvoConfigInformer.Informer().AddEventHandler(ctrl.eventHandler()) + clusterOperatorInformer.Informer().AddEventHandler(ctrl.eventHandler()) + + ctrl.syncHandler = ctrl.sync + + ctrl.cvoConfigLister = cvoConfigInformer.Lister() + ctrl.clusterOperatorLister = clusterOperatorInformer.Lister() + + ctrl.cvoConfigListerSynced = cvoConfigInformer.Informer().HasSynced + ctrl.operatorStatusSynced = clusterOperatorInformer.Informer().HasSynced + + return ctrl +} + +// Run runs the autoupdate controller. +func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + glog.Info("Starting AutoUpdateController") + defer glog.Info("Shutting down AutoUpdateController") + + if !cache.WaitForCacheSync(stopCh, + ctrl.cvoConfigListerSynced, + ctrl.operatorStatusSynced, + ) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (ctrl *Controller) eventHandler() cache.ResourceEventHandler { + key := fmt.Sprintf("%s/%s", ctrl.namespace, ctrl.name) + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { ctrl.queue.Add(key) }, + UpdateFunc: func(old, new interface{}) { ctrl.queue.Add(key) }, + DeleteFunc: func(obj interface{}) { ctrl.queue.Add(key) }, + } +} + +func (ctrl *Controller) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *Controller) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} + +func (ctrl *Controller) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < maxRetries { + glog.V(2).Infof("Error syncing controller %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + glog.V(2).Infof("Dropping controller %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) +} + +func (ctrl *Controller) sync(key string) error { + startTime := time.Now() + glog.V(4).Infof("Started syncing controller %q (%v)", key, startTime) + defer func() { + glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Since(startTime)) + }() + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + operatorstatus, err := ctrl.clusterOperatorLister.ClusterOperators(namespace).Get(name) + if errors.IsNotFound(err) { + glog.V(2).Infof("ClusterOperator %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + cvoconfig, err := ctrl.cvoConfigLister.CVOConfigs(namespace).Get(name) + if errors.IsNotFound(err) { + glog.V(2).Infof("CVOConfig %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + // Deep-copy otherwise we are mutating our cache. + // TODO: Deep-copy only when needed. + ops := operatorstatus.DeepCopy() + config := new(v1.CVOConfig) + cvoconfig.DeepCopyInto(config) + + obji, _, err := scheme.Codecs.UniversalDecoder().Decode(ops.Status.Extension.Raw, nil, &v1.CVOStatus{}) + if err != nil { + return fmt.Errorf("unable to decode CVOStatus from extension.Raw: %v", err) + } + cvoststatus, ok := obji.(*v1.CVOStatus) + if !ok { + return fmt.Errorf("expected *v1.CVOStatus found %T", obji) + } + + if !updateAvail(cvoststatus.AvailableUpdates) { + return nil + } + up := nextUpdate(cvoststatus.AvailableUpdates) + config.DesiredUpdate = up + + _, updated, err := resourceapply.ApplyCVOConfigFromCache(ctrl.cvoConfigLister, ctrl.client.ClusterversionV1(), config) + if updated { + glog.Info("Auto Update set to %s", up) + } + return err +} + +func updateAvail(ups []v1.Update) bool { + return len(ups) > 0 +} + +func nextUpdate(ups []v1.Update) v1.Update { + sorted := ups + sort.Slice(sorted, func(i, j int) bool { + vi := semver.MustParse(sorted[i].Version) + vj := semver.MustParse(sorted[j].Version) + return vi.GTE(vj) + }) + return sorted[0] +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate_test.go b/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate_test.go new file mode 100644 index 0000000000..06df2178b7 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/autoupdate/autoupdate_test.go @@ -0,0 +1,43 @@ +package autoupdate + +import ( + "fmt" + "testing" + + "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" +) + +func TestNextUpdate(t *testing.T) { + tests := []struct { + avail []string + want string + }{{ + avail: []string{"0.0.0", "0.0.1", "0.0.2"}, + want: "0.0.2", + }, { + avail: []string{"0.0.2", "0.0.0", "0.0.1"}, + want: "0.0.2", + }, { + avail: []string{"0.0.1", "0.0.0", "0.0.2"}, + want: "0.0.2", + }, { + avail: []string{"0.0.0", "0.0.0+new.2", "0.0.0+new.3"}, + want: "0.0.0+new.3", + }, { + avail: []string{"0.0.0", "0.0.0-new.2", "0.0.0-new.3"}, + want: "0.0.0", + }} + for idx, test := range tests { + t.Run(fmt.Sprintf("test: #%d", idx), func(t *testing.T) { + ups := []v1.Update{} + for _, v := range test.avail { + ups = append(ups, v1.Update{Version: v}) + } + + got := nextUpdate(ups) + if got.Version != test.want { + t.Fatalf("mismatch: got %s want: %s", got, test.want) + } + }) + } +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cincinnati/cincinnati.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cincinnati/cincinnati.go new file mode 100644 index 0000000000..4d5219084e --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cincinnati/cincinnati.go @@ -0,0 +1,127 @@ +package cincinnati + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/blang/semver" + "github.com/google/uuid" +) + +const ( + // ContentTypeGraphV1 is the MIME type specified in the HTTP Accept header + // of requests sent to the Cincinnati Graph API. + ContentTypeGraphV1 = "application/vnd.redhat.cincinnati.graph+json; version=1.0" +) + +// Client is a Cincinnati client which can be used to fetch update graphs from +// an upstream Cincinnati stack. +type Client struct { + id uuid.UUID +} + +// NewClient creates a new Cincinnati client with the given client identifier. +func NewClient(id uuid.UUID) Client { + return Client{id: id} +} + +// Update is a single node from the update graph. +type Update node + +// GetUpdates fetches the next-applicable update payloads from the specified +// upstream Cincinnati stack given the current version and channel. The next- +// applicable updates are determined by downloading the update graph, finding +// the current version within that graph (typically the root node), and then +// finding all of the children. These children are the available updates for +// the current version and their payloads indicate from where the actual update +// image can be downloaded. +func (c Client) GetUpdates(upstream string, channel string, version semver.Version) ([]Update, error) { + // Download the update graph. + req, err := http.NewRequest("GET", upstream, nil) + if err != nil { + return nil, err + } + req.Header.Add("Accept", ContentTypeGraphV1) + + client := http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected HTTP status: %s", resp.Status) + } + + // Parse the graph. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var graph graph + if err = json.Unmarshal(body, &graph); err != nil { + return nil, err + } + + // Find the current version within the graph. + var currentIdx int + for i, node := range graph.Nodes { + if version.EQ(node.Version) { + currentIdx = i + break + } + } + + // Find the children of the current version. + var nextIdxs []int + for _, edge := range graph.Edges { + if edge.Origin == currentIdx { + nextIdxs = append(nextIdxs, edge.Destination) + } + } + + var updates []Update + for _, i := range nextIdxs { + updates = append(updates, Update(graph.Nodes[i])) + } + + return updates, nil +} + +type graph struct { + Nodes []node + Edges []edge +} + +type node struct { + Version semver.Version + Payload string +} + +type edge struct { + Origin int + Destination int +} + +// UnmarshalJSON unmarshals an edge in the update graph. The edge's JSON +// representation is a two-element array of indices, but Go's representation is +// a struct with two elements so this custom unmarshal method is required. +func (e *edge) UnmarshalJSON(data []byte) error { + var fields []int + if err := json.Unmarshal(data, &fields); err != nil { + return err + } + + if len(fields) != 2 { + return fmt.Errorf("expected 2 fields, found %d", len(fields)) + } + + e.Origin = fields[0] + e.Destination = fields[1] + + return nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/cvo.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/cvo.go new file mode 100644 index 0000000000..e41da786b1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/cvo.go @@ -0,0 +1,273 @@ +package cvo + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + apiextclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextinformersv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1" + apiextlistersv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + appsinformersv1 "k8s.io/client-go/informers/apps/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + appslisterv1 "k8s.io/client-go/listers/apps/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/cluster-version-operator/lib/resourceapply" + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + clientset "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + cvinformersv1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1" + osinformersv1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1" + cvlistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1" + oslistersv1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1" +) + +const ( + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +// ownerKind contains the schema.GroupVersionKind for type that owns objects managed by CVO. +var ownerKind = cvv1.SchemeGroupVersion.WithKind("CVOConfig") + +// Operator defines cluster version operator. +type Operator struct { + // nodename allows CVO to sync fetchPayload to same node as itself. + nodename string + // namespace and name are used to find the CVOConfig, OperatorStatus. + namespace, name string + // releaseImage allows templating CVO deployment manifest. + releaseImage string + + // restConfig is used to create resourcebuilder. + restConfig *rest.Config + + client clientset.Interface + kubeClient kubernetes.Interface + apiExtClient apiextclientset.Interface + eventRecorder record.EventRecorder + + syncHandler func(key string) error + + clusterOperatorLister oslistersv1.ClusterOperatorLister + + crdLister apiextlistersv1beta1.CustomResourceDefinitionLister + deployLister appslisterv1.DeploymentLister + cvoConfigLister cvlistersv1.CVOConfigLister + crdListerSynced cache.InformerSynced + deployListerSynced cache.InformerSynced + cvoConfigListerSynced cache.InformerSynced + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +// New returns a new cluster version operator. +func New( + nodename string, + namespace, name string, + releaseImage string, + cvoConfigInformer cvinformersv1.CVOConfigInformer, + clusterOperatorInformer osinformersv1.ClusterOperatorInformer, + crdInformer apiextinformersv1beta1.CustomResourceDefinitionInformer, + deployInformer appsinformersv1.DeploymentInformer, + restConfig *rest.Config, + client clientset.Interface, + kubeClient kubernetes.Interface, + apiExtClient apiextclientset.Interface, +) *Operator { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + + optr := &Operator{ + nodename: nodename, + namespace: namespace, + name: name, + releaseImage: releaseImage, + restConfig: restConfig, + client: client, + kubeClient: kubeClient, + apiExtClient: apiExtClient, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "clusterversionoperator"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "clusterversionoperator"), + } + + cvoConfigInformer.Informer().AddEventHandler(optr.eventHandler()) + crdInformer.Informer().AddEventHandler(optr.eventHandler()) + + optr.syncHandler = optr.sync + + optr.clusterOperatorLister = clusterOperatorInformer.Lister() + + optr.crdLister = crdInformer.Lister() + optr.crdListerSynced = crdInformer.Informer().HasSynced + optr.deployLister = deployInformer.Lister() + optr.deployListerSynced = deployInformer.Informer().HasSynced + optr.cvoConfigLister = cvoConfigInformer.Lister() + optr.cvoConfigListerSynced = cvoConfigInformer.Informer().HasSynced + + return optr +} + +// Run runs the cluster version operator. +func (optr *Operator) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer optr.queue.ShutDown() + + glog.Info("Starting ClusterVersionOperator") + defer glog.Info("Shutting down ClusterVersionOperator") + + if !cache.WaitForCacheSync(stopCh, + optr.crdListerSynced, + optr.deployListerSynced, + optr.cvoConfigListerSynced, + ) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(optr.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (optr *Operator) eventHandler() cache.ResourceEventHandler { + workQueueKey := fmt.Sprintf("%s/%s", optr.namespace, optr.name) + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { optr.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { optr.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { optr.queue.Add(workQueueKey) }, + } +} + +func (optr *Operator) worker() { + for optr.processNextWorkItem() { + } +} + +func (optr *Operator) processNextWorkItem() bool { + key, quit := optr.queue.Get() + if quit { + return false + } + defer optr.queue.Done(key) + + err := optr.syncHandler(key.(string)) + optr.handleErr(err, key) + + return true +} + +func (optr *Operator) handleErr(err error, key interface{}) { + if err == nil { + optr.queue.Forget(key) + return + } + + if optr.queue.NumRequeues(key) < maxRetries { + glog.V(2).Infof("Error syncing operator %v: %v", key, err) + optr.queue.AddRateLimited(key) + return + } + + err = optr.syncDegradedStatus(err) + utilruntime.HandleError(err) + glog.V(2).Infof("Dropping operator %q out of the queue: %v", key, err) + optr.queue.Forget(key) +} + +func (optr *Operator) sync(key string) error { + startTime := time.Now() + glog.V(4).Infof("Started syncing operator %q (%v)", key, startTime) + defer func() { + glog.V(4).Infof("Finished syncing operator %q (%v)", key, time.Since(startTime)) + }() + + // We always run this to make sure CVOConfig can be synced. + if err := optr.syncCustomResourceDefinitions(); err != nil { + return err + } + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + var obj *cvv1.CVOConfig + obj, err = optr.cvoConfigLister.CVOConfigs(namespace).Get(name) + if apierrors.IsNotFound(err) { + obj, err = optr.getConfig() + } + if err != nil { + return err + } + + config := &cvv1.CVOConfig{} + obj.DeepCopyInto(config) + + if err := optr.syncProgressingStatus(config); err != nil { + return err + } + + payloadDir, err := optr.updatePayloadDir(config) + if err != nil { + return err + } + releaseImage := optr.releaseImage + if config.DesiredUpdate.Payload != "" { + releaseImage = config.DesiredUpdate.Payload + } + payload, err := loadUpdatePayload(payloadDir, releaseImage) + if err != nil { + return err + } + + if err := optr.syncUpdatePayload(config, payload); err != nil { + return err + } + + return optr.syncAvailableStatus(config) +} + +func (optr *Operator) getConfig() (*cvv1.CVOConfig, error) { + upstream := cvv1.URL("http://localhost:8080/graph") + channel := "fast" + id, _ := uuid.NewRandom() + if id.Variant() != uuid.RFC4122 { + return nil, fmt.Errorf("invalid %q, must be an RFC4122-variant UUID: found %s", id, id.Variant()) + } + if id.Version() != 4 { + return nil, fmt.Errorf("Invalid %q, must be a version-4 UUID: found %s", id, id.Version()) + } + + // XXX: generate CVOConfig from options calculated above. + config := &cvv1.CVOConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: optr.namespace, + Name: optr.name, + }, + Upstream: upstream, + Channel: channel, + ClusterID: cvv1.ClusterID(id.String()), + } + + actual, _, err := resourceapply.ApplyCVOConfigFromCache(optr.cvoConfigLister, optr.client.ClusterversionV1(), config) + return actual, err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/image.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/image.go new file mode 100644 index 0000000000..90fa27414a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/image.go @@ -0,0 +1,23 @@ +package cvo + +import "fmt" + +// ImageForShortName returns the image using the updatepayload embedded in +// the Operator. +func ImageForShortName(name string) (string, error) { + up, err := loadUpdatePayload(defaultUpdatePayloadDir, "") + if err != nil { + return "", fmt.Errorf("error loading update payload from %q: %v", defaultUpdatePayloadDir, err) + } + + for _, tag := range up.imageRef.Spec.Tags { + if tag.Name == name { + // we found the short name in ImageStream + if tag.From != nil && tag.From.Kind == "DockerImage" { + return tag.From.Name, nil + } + } + } + + return "", fmt.Errorf("error: Unknown name requested, could not find %s in UpdatePayload", name) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient/client.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient/client.go new file mode 100644 index 0000000000..c154b1c844 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient/client.go @@ -0,0 +1,103 @@ +package dynamicclient + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type resourceClientFactory struct { + dynamicClient dynamic.Interface + restMapper *restmapper.DeferredDiscoveryRESTMapper +} + +var ( + // this stores the singleton in a package local + singletonFactory *resourceClientFactory + once sync.Once +) + +// Private constructor for once.Do +func newSingletonFactory(config *rest.Config) func() { + return func() { + cachedDiscoveryClient := cached.NewMemCacheClient(kubernetes.NewForConfigOrDie(config).Discovery()) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient) + restMapper.Reset() + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + panic(err) + } + + singletonFactory = &resourceClientFactory{ + dynamicClient: dynamicClient, + restMapper: restMapper, + } + singletonFactory.runBackgroundCacheReset(1 * time.Minute) + } +} + +// New returns the resource client using a singleton factory +func New(config *rest.Config, gvk schema.GroupVersionKind, namespace string) (dynamic.ResourceInterface, error) { + once.Do(newSingletonFactory(config)) + return singletonFactory.getResourceClient(gvk, namespace) +} + +// getResourceClient returns the dynamic client for the resource specified by the gvk. +func (c *resourceClientFactory) getResourceClient(gvk schema.GroupVersionKind, namespace string) (dynamic.ResourceInterface, error) { + var ( + gvr *schema.GroupVersionResource + namespaced bool + err error + ) + gvr, namespaced, err = gvkToGVR(gvk, c.restMapper) + if meta.IsNoMatchError(err) { + // refresh the restMapperCache and try once more. + c.restMapper.Reset() + gvr, namespaced, err = gvkToGVR(gvk, c.restMapper) + } + if err != nil { + return nil, fmt.Errorf("failed to get resource type: %v", err) + } + + // sometimes manifests of non-namespaced resources + // might have namespace set. + // preventing such cases. + ns := namespace + if !namespaced { + ns = "" + } + return c.dynamicClient.Resource(*gvr).Namespace(ns), nil +} + +func gvkToGVR(gvk schema.GroupVersionKind, restMapper *restmapper.DeferredDiscoveryRESTMapper) (*schema.GroupVersionResource, bool, error) { + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if meta.IsNoMatchError(err) { + return nil, false, err + } + if err != nil { + return nil, false, fmt.Errorf("failed to get the resource REST mapping for GroupVersionKind(%s): %v", gvk.String(), err) + } + + return &mapping.Resource, mapping.Scope.Name() == meta.RESTScopeNameNamespace, nil +} + +// runBackgroundCacheReset - Starts the rest mapper cache reseting +// at a duration given. +func (c *resourceClientFactory) runBackgroundCacheReset(duration time.Duration) { + ticker := time.NewTicker(duration) + go func() { + for range ticker.C { + c.restMapper.Reset() + } + }() +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/generic.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/generic.go new file mode 100644 index 0000000000..f015c4f112 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/generic.go @@ -0,0 +1,106 @@ +package internal + +import ( + "encoding/json" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourcebuilder" + "github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient" + "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" +) + +// readUnstructuredV1OrDie reads operatorstatus object from bytes. Panics on error. +func readUnstructuredV1OrDie(objBytes []byte) *unstructured.Unstructured { + udi, _, err := scheme.Codecs.UniversalDecoder().Decode(objBytes, nil, &unstructured.Unstructured{}) + if err != nil { + panic(err) + } + return udi.(*unstructured.Unstructured) +} + +func applyUnstructured(client dynamic.ResourceInterface, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + if required.GetName() == "" { + return nil, false, fmt.Errorf("invalid object: name cannot be empty") + } + existing, err := client.Get(required.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existing.SetAnnotations(required.GetAnnotations()) + existing.SetLabels(required.GetLabels()) + existing.SetOwnerReferences(required.GetOwnerReferences()) + skipKeys := sets.NewString("apiVersion", "kind", "metadata", "status") + for k, v := range required.Object { + if skipKeys.Has(k) { + continue + } + existing.Object[k] = v + } + + actual, err := client.Update(existing) + if err != nil { + return nil, false, err + } + return actual, existing.GetResourceVersion() != actual.GetResourceVersion(), nil +} + +type genericBuilder struct { + client dynamic.ResourceInterface + raw []byte + modifier resourcebuilder.MetaV1ObjectModifierFunc +} + +// NewGenericBuilder returns an implentation of resourcebuilder.Interface that +// uses dynamic clients for applying. +func NewGenericBuilder(config *rest.Config, m lib.Manifest) (resourcebuilder.Interface, error) { + client, err := dynamicclient.New(config, m.GVK, m.Object().GetNamespace()) + if err != nil { + return nil, err + } + return &genericBuilder{ + client: client, + raw: m.Raw, + }, nil +} + +func (b *genericBuilder) WithModifier(f resourcebuilder.MetaV1ObjectModifierFunc) resourcebuilder.Interface { + b.modifier = f + return b +} + +func (b *genericBuilder) Do() error { + ud := readUnstructuredV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(ud) + } + + _, _, err := applyUnstructured(b.client, ud) + return err +} + +func createPatch(original, modified runtime.Object) ([]byte, error) { + originalData, err := json.Marshal(original) + if err != nil { + return nil, err + } + modifiedData, err := json.Marshal(modified) + if err != nil { + return nil, err + } + return strategicpatch.CreateTwoWayMergePatch(originalData, modifiedData, original) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/operatorstatus.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/operatorstatus.go new file mode 100644 index 0000000000..81d6704b0b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/internal/operatorstatus.go @@ -0,0 +1,115 @@ +package internal + +import ( + "time" + + "github.com/davecgh/go-spew/spew" + + "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourcebuilder" + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + osclientv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1" +) + +var ( + osScheme = runtime.NewScheme() + osCodecs = serializer.NewCodecFactory(osScheme) + + osMapper = resourcebuilder.NewResourceMapper() +) + +func init() { + if err := osv1.AddToScheme(osScheme); err != nil { + panic(err) + } + + osMapper.RegisterGVK(osv1.SchemeGroupVersion.WithKind("OperatorStatus"), newOperatorStatusBuilder) + osMapper.AddToMap(resourcebuilder.Mapper) +} + +// readOperatorStatusV1OrDie reads operatorstatus object from bytes. Panics on error. +func readOperatorStatusV1OrDie(objBytes []byte) *osv1.ClusterOperator { + requiredObj, err := runtime.Decode(osCodecs.UniversalDecoder(osv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*osv1.ClusterOperator) +} + +type operatorStatusBuilder struct { + client *osclientv1.OperatorstatusV1Client + raw []byte + modifier resourcebuilder.MetaV1ObjectModifierFunc +} + +func newOperatorStatusBuilder(config *rest.Config, m lib.Manifest) resourcebuilder.Interface { + return &operatorStatusBuilder{ + client: osclientv1.NewForConfigOrDie(config), + raw: m.Raw, + } +} + +func (b *operatorStatusBuilder) WithModifier(f resourcebuilder.MetaV1ObjectModifierFunc) resourcebuilder.Interface { + b.modifier = f + return b +} + +func (b *operatorStatusBuilder) Do() error { + os := readOperatorStatusV1OrDie(b.raw) + if b.modifier != nil { + b.modifier(os) + } + + return waitForOperatorStatusToBeDone(b.client, os) +} + +const ( + osPollInternal = 1 * time.Second + osPollTimeout = 1 * time.Minute +) + +func waitForOperatorStatusToBeDone(client osclientv1.ClusterOperatorsGetter, os *osv1.ClusterOperator) error { + return wait.Poll(osPollInternal, osPollTimeout, func() (bool, error) { + eos, err := client.ClusterOperators(os.Namespace).Get(os.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + glog.V(4).Infof("OperatorStatus %s/%s is reporting %v", + eos.Namespace, eos.Name, spew.Sdump(eos.Status)) + + if eos.Status.Version != os.Status.Version { + return false, nil + } + + available := false + progressing := true + failing := true + for _, condition := range eos.Status.Conditions { + switch { + case condition.Type == osv1.OperatorAvailable && condition.Status == osv1.ConditionTrue: + available = true + case condition.Type == osv1.OperatorProgressing && condition.Status == osv1.ConditionFalse: + progressing = false + case condition.Type == osv1.OperatorFailing && condition.Status == osv1.ConditionFalse: + failing = false + } + } + + // if we're at the correct version, and available, not progressing, and not failing, we are done + if available && !progressing && !failing { + return true, nil + } + glog.V(3).Infof("OperatorStatus %s/%s is not done for version %s; it is version=%v, available=%v, progressing=%v, failing=%v", + eos.Namespace, eos.Name, os.Status.Version, + eos.Status.Version, available, progressing, failing) + + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/render.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/render.go new file mode 100644 index 0000000000..7fa24859d4 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/render.go @@ -0,0 +1,114 @@ +package cvo + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "text/template" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Render renders all the manifests from /manifests to outputDir. +func Render(outputDir, releaseImage string) error { + var ( + manifestsDir = filepath.Join(defaultUpdatePayloadDir, cvoManifestDir) + oManifestsDir = filepath.Join(outputDir, "manifests") + bootstrapDir = "/bootstrap" + oBootstrapDir = filepath.Join(outputDir, "bootstrap") + + renderConfig = manifestRenderConfig{ReleaseImage: releaseImage} + ) + + tasks := []struct { + idir string + odir string + skipFiles sets.String + }{{ + idir: manifestsDir, + odir: oManifestsDir, + skipFiles: sets.NewString("image-references"), + }, { + idir: bootstrapDir, + odir: oBootstrapDir, + skipFiles: sets.NewString(), + }} + var errs []error + for _, task := range tasks { + if err := renderDir(renderConfig, task.idir, task.odir, task.skipFiles); err != nil { + errs = append(errs, err) + } + } + + agg := utilerrors.NewAggregate(errs) + if agg != nil { + return fmt.Errorf("error rendering manifests: %v", agg.Error()) + } + return nil +} + +func renderDir(renderConfig manifestRenderConfig, idir, odir string, skipFiles sets.String) error { + if err := os.MkdirAll(odir, 0666); err != nil { + return err + } + files, err := ioutil.ReadDir(idir) + if err != nil { + return err + } + var errs []error + for _, file := range files { + if file.IsDir() { + continue + } + if skipFiles.Has(file.Name()) { + continue + } + + ipath := filepath.Join(idir, file.Name()) + iraw, err := ioutil.ReadFile(ipath) + if err != nil { + errs = append(errs, err) + continue + } + + rraw, err := renderManifest(renderConfig, iraw) + if err != nil { + errs = append(errs, err) + continue + } + + opath := filepath.Join(odir, file.Name()) + if err := ioutil.WriteFile(opath, rraw, 0666); err != nil { + errs = append(errs, err) + continue + } + } + + agg := utilerrors.NewAggregate(errs) + if agg != nil { + return fmt.Errorf("error rendering manifests: %v", agg.Error()) + } + return nil +} + +type manifestRenderConfig struct { + ReleaseImage string +} + +// renderManifest Executes go text template from `manifestBytes` with `config`. +func renderManifest(config manifestRenderConfig, manifestBytes []byte) ([]byte, error) { + tmpl, err := template.New("manifest").Parse(string(manifestBytes)) + if err != nil { + return nil, fmt.Errorf("failed to parse manifest: %v", err) + } + + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, config); err != nil { + return nil, fmt.Errorf("failed to execute template: %v", err) + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/status.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/status.go new file mode 100644 index 0000000000..c10d15ee80 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/status.go @@ -0,0 +1,131 @@ +package cvo + +import ( + "fmt" + + "github.com/google/uuid" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourcemerge" + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + "github.com/openshift/cluster-version-operator/pkg/cincinnati" + "github.com/openshift/cluster-version-operator/pkg/version" +) + +func (optr *Operator) syncProgressingStatus(config *cvv1.CVOConfig) error { + var cvoUpdates []cvv1.Update + if updates, err := checkForUpdate(*config); err == nil { + for _, update := range updates { + cvoUpdates = append(cvoUpdates, cvv1.Update{ + Version: update.Version.String(), + Payload: update.Payload, + }) + } + } + + status := &osv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: optr.namespace, + Name: optr.name, + }, + Status: osv1.ClusterOperatorStatus{ + Version: version.Raw, + Extension: runtime.RawExtension{ + Raw: nil, + Object: &cvv1.CVOStatus{ + AvailableUpdates: cvoUpdates, + }, + }, + }, + } + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorAvailable, Status: osv1.ConditionFalse}) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorProgressing, Status: osv1.ConditionTrue, + Message: fmt.Sprintf("Working towards %s", config), + }) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorFailing, Status: osv1.ConditionFalse}) + + _, _, err := resourceapply.ApplyOperatorStatusFromCache(optr.clusterOperatorLister, optr.client.OperatorstatusV1(), status) + return err +} + +func (optr *Operator) syncAvailableStatus(config *cvv1.CVOConfig) error { + var cvoUpdates []cvv1.Update + if updates, err := checkForUpdate(*config); err == nil { + for _, update := range updates { + cvoUpdates = append(cvoUpdates, cvv1.Update{ + Version: update.Version.String(), + Payload: update.Payload, + }) + } + } + + status := &osv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: optr.namespace, + Name: optr.name, + }, + Status: osv1.ClusterOperatorStatus{ + Version: version.Raw, + Extension: runtime.RawExtension{ + Raw: nil, + Object: &cvv1.CVOStatus{ + AvailableUpdates: cvoUpdates, + }, + }, + }, + } + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorAvailable, Status: osv1.ConditionTrue, + Message: fmt.Sprintf("Done applying %s", config), + }) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorProgressing, Status: osv1.ConditionFalse}) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorFailing, Status: osv1.ConditionFalse}) + + _, _, err := resourceapply.ApplyOperatorStatusFromCache(optr.clusterOperatorLister, optr.client.OperatorstatusV1(), status) + return err +} + +// syncDegradedStatus updates the OperatorStatus to Degraded. +// if ierr is nil, return nil +// if ierr is not nil, update OperatorStatus as Degraded and return ierr +func (optr *Operator) syncDegradedStatus(ierr error) error { + if ierr == nil { + return nil + } + + status := &osv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: optr.namespace, + Name: optr.name, + }, + Status: osv1.ClusterOperatorStatus{ + Version: version.Raw, + Extension: runtime.RawExtension{}, + }, + } + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorAvailable, Status: osv1.ConditionFalse}) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{Type: osv1.OperatorProgressing, Status: osv1.ConditionFalse}) + resourcemerge.SetOperatorStatusCondition(&status.Status.Conditions, osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorFailing, Status: osv1.ConditionTrue, + Message: fmt.Sprintf("error syncing: %v", ierr), + }) + + _, _, err := resourceapply.ApplyOperatorStatusFromCache(optr.clusterOperatorLister, optr.client.OperatorstatusV1(), status) + if err != nil { + return err + } + return ierr +} + +func checkForUpdate(config cvv1.CVOConfig) ([]cincinnati.Update, error) { + uuid, err := uuid.Parse(string(config.ClusterID)) + if err != nil { + return nil, err + } + return cincinnati.NewClient(uuid).GetUpdates(string(config.Upstream), config.Channel, version.Version) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/sync.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/sync.go new file mode 100644 index 0000000000..dfbf68ddfa --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/sync.go @@ -0,0 +1,162 @@ +package cvo + +import ( + "fmt" + "time" + + "github.com/golang/glog" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourceapply" + "github.com/openshift/cluster-version-operator/lib/resourcebuilder" + "github.com/openshift/cluster-version-operator/pkg/apis" + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + "github.com/openshift/cluster-version-operator/pkg/cvo/internal" +) + +func (optr *Operator) syncUpdatePayload(config *cvv1.CVOConfig, payload *updatePayload) error { + for _, manifest := range payload.manifests { + taskName := fmt.Sprintf("(%s) %s/%s", manifest.GVK.String(), manifest.Object().GetNamespace(), manifest.Object().GetName()) + glog.V(4).Infof("Running sync for %s", taskName) + glog.V(6).Infof("Manifest: %s", string(manifest.Raw)) + + ov, ok := getOverrideForManifest(config.Overrides, manifest) + if ok && ov.Unmanaged { + glog.V(4).Infof("Skipping %s as unmanaged", taskName) + continue + } + + if err := wait.ExponentialBackoff(wait.Backoff{ + Duration: time.Second * 10, + Factor: 1.3, + Steps: 3, + }, func() (bool, error) { + // build resource builder for manifest + var b resourcebuilder.Interface + var err error + if resourcebuilder.Mapper.Exists(manifest.GVK) { + b, err = resourcebuilder.New(resourcebuilder.Mapper, optr.restConfig, manifest) + } else { + b, err = internal.NewGenericBuilder(optr.restConfig, manifest) + } + if err != nil { + glog.Errorf("error creating resourcebuilder for %s: %v", taskName, err) + return false, nil + } + // run builder for the manifest + if err := b.Do(); err != nil { + glog.Errorf("error running apply for %s: %v", taskName, err) + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("timed out trying to apply %s", taskName) + } + + glog.V(4).Infof("Done syncing for %s", taskName) + } + return nil +} + +// getOverrideForManifest returns the override and true when override exists for manifest. +func getOverrideForManifest(overrides []cvv1.ComponentOverride, manifest lib.Manifest) (cvv1.ComponentOverride, bool) { + for idx, ov := range overrides { + kind, namespace, name := manifest.GVK.Kind, manifest.Object().GetNamespace(), manifest.Object().GetName() + if ov.Kind == kind && + (namespace == "" || ov.Namespace == namespace) && // cluster-scoped objects don't have namespace. + ov.Name == name { + return overrides[idx], true + } + } + return cvv1.ComponentOverride{}, false +} + +func ownerRefModifier(config *cvv1.CVOConfig) resourcebuilder.MetaV1ObjectModifierFunc { + oref := metav1.NewControllerRef(config, ownerKind) + return func(obj metav1.Object) { + obj.SetOwnerReferences([]metav1.OwnerReference{*oref}) + } +} + +func (optr *Operator) syncCustomResourceDefinitions() error { + crds := []*apiextv1beta1.CustomResourceDefinition{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("operatorstatuses.%s", apis.OperatorStatusGroupName), + Namespace: metav1.NamespaceDefault, + }, + Spec: apiextv1beta1.CustomResourceDefinitionSpec{ + Group: apis.OperatorStatusGroupName, + Version: "v1", + Scope: "Namespaced", + Names: apiextv1beta1.CustomResourceDefinitionNames{ + Plural: "operatorstatuses", + Singular: "operatorstatus", + Kind: "OperatorStatus", + ListKind: "OperatorStatusList", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusteroperators.%s", apis.OperatorStatusGroupName), + Namespace: metav1.NamespaceDefault, + }, + Spec: apiextv1beta1.CustomResourceDefinitionSpec{ + Group: apis.OperatorStatusGroupName, + Version: "v1", + Scope: "Namespaced", + Names: apiextv1beta1.CustomResourceDefinitionNames{ + Plural: "clusteroperators", + Singular: "clusteroperator", + Kind: "ClusterOperator", + ListKind: "ClusterOperatorList", + }, + }, + }, + } + + for _, crd := range crds { + _, updated, err := resourceapply.ApplyCustomResourceDefinitionFromCache(optr.crdLister, optr.apiExtClient.ApiextensionsV1beta1(), crd) + if err != nil { + return err + } + if updated { + if err := optr.waitForCustomResourceDefinition(crd); err != nil { + return err + } + } + } + return nil +} + +const ( + customResourceReadyInterval = time.Second + customResourceReadyTimeout = 1 * time.Minute +) + +func (optr *Operator) waitForCustomResourceDefinition(resource *apiextv1beta1.CustomResourceDefinition) error { + return wait.Poll(customResourceReadyInterval, customResourceReadyTimeout, func() (bool, error) { + crd, err := optr.crdLister.Get(resource.Name) + if errors.IsNotFound(err) { + // exit early to recreate the crd. + return false, err + } + if err != nil { + glog.Errorf("error getting CustomResourceDefinition %s: %v", resource.Name, err) + return false, nil + } + + for _, condition := range crd.Status.Conditions { + if condition.Type == apiextv1beta1.Established && condition.Status == apiextv1beta1.ConditionTrue { + return true, nil + } + } + glog.V(4).Infof("CustomResourceDefinition %s is not ready. conditions: %v", crd.Name, crd.Status.Conditions) + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/updatepayload.go b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/updatepayload.go new file mode 100644 index 0000000000..ff2f0371ec --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/cvo/updatepayload.go @@ -0,0 +1,272 @@ +package cvo + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + imagev1 "github.com/openshift/api/image/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + randutil "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" + + "github.com/golang/glog" + "github.com/openshift/cluster-version-operator/lib" + "github.com/openshift/cluster-version-operator/lib/resourcebuilder" + "github.com/openshift/cluster-version-operator/lib/resourceread" + cvv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" +) + +type updatePayload struct { + // XXX: cincinatti.json struct + + imageRef *imagev1.ImageStream + + manifests []lib.Manifest +} + +const ( + defaultUpdatePayloadDir = "/" + targetUpdatePayloadsDir = "/etc/cvo/updatepayloads" + + cvoManifestDir = "manifests" + releaseManifestDir = "release-manifests" + + cincinnatiJSONFile = "cincinnati.json" + imageReferencesFile = "image-references" +) + +func loadUpdatePayload(dir, releaseImage string) (*updatePayload, error) { + glog.V(4).Infof("Loading updatepayload from %q", dir) + if err := validateUpdatePayload(dir); err != nil { + return nil, err + } + var ( + cvoDir = filepath.Join(dir, cvoManifestDir) + releaseDir = filepath.Join(dir, releaseManifestDir) + ) + + // XXX: load cincinnatiJSONFile + cjf := filepath.Join(releaseDir, cincinnatiJSONFile) + // XXX: load imageReferencesFile + irf := filepath.Join(releaseDir, imageReferencesFile) + imageRefData, err := ioutil.ReadFile(irf) + if err != nil { + return nil, err + } + + imageRef := resourceread.ReadImageStreamV1OrDie(imageRefData) + mrc := manifestRenderConfig{ReleaseImage: releaseImage} + + var manifests []lib.Manifest + var errs []error + tasks := []struct { + idir string + preprocess func([]byte) ([]byte, error) + skipFiles sets.String + }{{ + idir: cvoDir, + preprocess: func(ib []byte) ([]byte, error) { return renderManifest(mrc, ib) }, + skipFiles: sets.NewString(), + }, { + idir: releaseDir, + preprocess: nil, + skipFiles: sets.NewString(cjf, irf), + }} + for _, task := range tasks { + files, err := ioutil.ReadDir(task.idir) + if err != nil { + return nil, err + } + + for _, file := range files { + if file.IsDir() { + continue + } + + p := filepath.Join(task.idir, file.Name()) + if task.skipFiles.Has(p) { + continue + } + + raw, err := ioutil.ReadFile(p) + if err != nil { + errs = append(errs, fmt.Errorf("error reading file %s: %v", file.Name(), err)) + continue + } + if task.preprocess != nil { + raw, err = task.preprocess(raw) + if err != nil { + errs = append(errs, fmt.Errorf("error running preprocess on %s: %v", file.Name(), err)) + continue + } + } + ms, err := lib.ParseManifests(bytes.NewReader(raw)) + if err != nil { + errs = append(errs, fmt.Errorf("error parsing %s: %v", file.Name(), err)) + continue + } + manifests = append(manifests, ms...) + } + } + + agg := utilerrors.NewAggregate(errs) + if agg != nil { + return nil, fmt.Errorf("error loading manifests from %s: %v", dir, agg.Error()) + } + return &updatePayload{ + imageRef: imageRef, + manifests: manifests, + }, nil +} + +func (optr *Operator) updatePayloadDir(config *cvv1.CVOConfig) (string, error) { + ret := defaultUpdatePayloadDir + tdir, err := optr.targetUpdatePayloadDir(config) + if err != nil { + return "", fmt.Errorf("error fetching targetUpdatePayloadDir: %v", err) + } + if len(tdir) > 0 { + ret = tdir + } + return ret, nil +} + +func (optr *Operator) targetUpdatePayloadDir(config *cvv1.CVOConfig) (string, error) { + if !isTargetSet(config.DesiredUpdate) { + return "", nil + } + + tdir := filepath.Join(targetUpdatePayloadsDir, config.DesiredUpdate.Version) + err := validateUpdatePayload(tdir) + if os.IsNotExist(err) { + // the dirs don't exist, try fetching the payload to tdir. + if err := optr.fetchUpdatePayloadToDir(tdir, config); err != nil { + return "", err + } + } + if err != nil { + return "", err + } + + // now that payload has been loaded check validation. + if err := validateUpdatePayload(tdir); err != nil { + return "", err + } + return tdir, nil +} + +func validateUpdatePayload(dir string) error { + // XXX: validate that cincinnati.json is correct + // validate image-references files is correct. + + // make sure cvo and release manifests dirs exist. + _, err := os.Stat(filepath.Join(dir, cvoManifestDir)) + if err != nil { + return err + } + releaseDir := filepath.Join(dir, releaseManifestDir) + _, err = os.Stat(releaseDir) + if err != nil { + return err + } + + // make sure image-references file exists in releaseDir + _, err = os.Stat(filepath.Join(releaseDir, imageReferencesFile)) + if err != nil { + return err + } + return nil +} + +func (optr *Operator) fetchUpdatePayloadToDir(dir string, config *cvv1.CVOConfig) error { + var ( + version = config.DesiredUpdate.Version + payload = config.DesiredUpdate.Payload + name = fmt.Sprintf("%s-%s-%s", optr.name, version, randutil.String(5)) + namespace = optr.namespace + deadline = pointer.Int64Ptr(2 * 60) + nodeSelectorKey = "node-role.kubernetes.io/master" + nodename = optr.nodename + cmd = []string{"/bin/sh"} + args = []string{"-c", copyPayloadCmd(dir)} + ) + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + ActiveDeadlineSeconds: deadline, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "payload", + Image: payload, + Command: cmd, + Args: args, + VolumeMounts: []corev1.VolumeMount{{ + MountPath: targetUpdatePayloadsDir, + Name: "payloads", + }}, + SecurityContext: &corev1.SecurityContext{ + Privileged: pointer.BoolPtr(true), + }, + }}, + Volumes: []corev1.Volume{{ + Name: "payloads", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: targetUpdatePayloadsDir, + }, + }, + }}, + NodeName: nodename, + NodeSelector: map[string]string{ + nodeSelectorKey: "", + }, + Tolerations: []corev1.Toleration{{ + Key: nodeSelectorKey, + }}, + RestartPolicy: corev1.RestartPolicyOnFailure, + }, + }, + }, + } + + _, err := optr.kubeClient.BatchV1().Jobs(job.Namespace).Create(job) + if err != nil { + return err + } + return resourcebuilder.WaitForJobCompletion(optr.kubeClient.BatchV1(), job) +} + +// copyPayloadCmd returns command that copies cvo and release manifests from deafult location +// to the target dir. +// It is made up of 2 commands: +// `mkdir -p && mv ` +// `mkdir -p && mv ` +func copyPayloadCmd(tdir string) string { + var ( + fromCVOPath = filepath.Join(defaultUpdatePayloadDir, cvoManifestDir) + toCVOPath = filepath.Join(tdir, cvoManifestDir) + cvoCmd = fmt.Sprintf("mkdir -p %s && mv %s %s", tdir, fromCVOPath, toCVOPath) + + fromReleasePath = filepath.Join(defaultUpdatePayloadDir, releaseManifestDir) + toReleasePath = filepath.Join(tdir, releaseManifestDir) + releaseCmd = fmt.Sprintf("mkdir -p %s && mv %s %s", tdir, fromReleasePath, toReleasePath) + ) + return fmt.Sprintf("%s && %s", cvoCmd, releaseCmd) +} + +func isTargetSet(desired cvv1.Update) bool { + return desired.Payload != "" && + desired.Version != "" +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/clientset.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/clientset.go new file mode 100644 index 0000000000..aa1c984bed --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/clientset.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + clusterversionv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1" + operatorstatusv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ClusterversionV1() clusterversionv1.ClusterversionV1Interface + // Deprecated: please explicitly pick a version if possible. + Clusterversion() clusterversionv1.ClusterversionV1Interface + OperatorstatusV1() operatorstatusv1.OperatorstatusV1Interface + // Deprecated: please explicitly pick a version if possible. + Operatorstatus() operatorstatusv1.OperatorstatusV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + clusterversionV1 *clusterversionv1.ClusterversionV1Client + operatorstatusV1 *operatorstatusv1.OperatorstatusV1Client +} + +// ClusterversionV1 retrieves the ClusterversionV1Client +func (c *Clientset) ClusterversionV1() clusterversionv1.ClusterversionV1Interface { + return c.clusterversionV1 +} + +// Deprecated: Clusterversion retrieves the default version of ClusterversionClient. +// Please explicitly pick a version. +func (c *Clientset) Clusterversion() clusterversionv1.ClusterversionV1Interface { + return c.clusterversionV1 +} + +// OperatorstatusV1 retrieves the OperatorstatusV1Client +func (c *Clientset) OperatorstatusV1() operatorstatusv1.OperatorstatusV1Interface { + return c.operatorstatusV1 +} + +// Deprecated: Operatorstatus retrieves the default version of OperatorstatusClient. +// Please explicitly pick a version. +func (c *Clientset) Operatorstatus() operatorstatusv1.OperatorstatusV1Interface { + return c.operatorstatusV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.clusterversionV1, err = clusterversionv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.operatorstatusV1, err = operatorstatusv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.clusterversionV1 = clusterversionv1.NewForConfigOrDie(c) + cs.operatorstatusV1 = operatorstatusv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.clusterversionV1 = clusterversionv1.New(c) + cs.operatorstatusV1 = operatorstatusv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/doc.go new file mode 100644 index 0000000000..41721ca52d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..6711c2983d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + clusterversionv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1" + fakeclusterversionv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake" + operatorstatusv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1" + fakeoperatorstatusv1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// ClusterversionV1 retrieves the ClusterversionV1Client +func (c *Clientset) ClusterversionV1() clusterversionv1.ClusterversionV1Interface { + return &fakeclusterversionv1.FakeClusterversionV1{Fake: &c.Fake} +} + +// Clusterversion retrieves the ClusterversionV1Client +func (c *Clientset) Clusterversion() clusterversionv1.ClusterversionV1Interface { + return &fakeclusterversionv1.FakeClusterversionV1{Fake: &c.Fake} +} + +// OperatorstatusV1 retrieves the OperatorstatusV1Client +func (c *Clientset) OperatorstatusV1() operatorstatusv1.OperatorstatusV1Interface { + return &fakeoperatorstatusv1.FakeOperatorstatusV1{Fake: &c.Fake} +} + +// Operatorstatus retrieves the OperatorstatusV1Client +func (c *Clientset) Operatorstatus() operatorstatusv1.OperatorstatusV1Interface { + return &fakeoperatorstatusv1.FakeOperatorstatusV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..9b99e71670 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/register.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..0d0ca9258a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clusterversionv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + operatorstatusv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + clusterversionv1.AddToScheme(scheme) + operatorstatusv1.AddToScheme(scheme) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7dc3756168 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..5eac8f0ea4 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + clusterversionv1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + operatorstatusv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + clusterversionv1.AddToScheme(scheme) + operatorstatusv1.AddToScheme(scheme) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/clusterversion.openshift.io_client.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/clusterversion.openshift.io_client.go new file mode 100644 index 0000000000..688c49229e --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/clusterversion.openshift.io_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type ClusterversionV1Interface interface { + RESTClient() rest.Interface + CVOConfigsGetter +} + +// ClusterversionV1Client is used to interact with features provided by the clusterversion.openshift.io group. +type ClusterversionV1Client struct { + restClient rest.Interface +} + +func (c *ClusterversionV1Client) CVOConfigs(namespace string) CVOConfigInterface { + return newCVOConfigs(c, namespace) +} + +// NewForConfig creates a new ClusterversionV1Client for the given config. +func NewForConfig(c *rest.Config) (*ClusterversionV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ClusterversionV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ClusterversionV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ClusterversionV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ClusterversionV1Client for the given RESTClient. +func New(c rest.Interface) *ClusterversionV1Client { + return &ClusterversionV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ClusterversionV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/cvoconfig.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/cvoconfig.go new file mode 100644 index 0000000000..09341398dd --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/cvoconfig.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + scheme "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CVOConfigsGetter has a method to return a CVOConfigInterface. +// A group's client should implement this interface. +type CVOConfigsGetter interface { + CVOConfigs(namespace string) CVOConfigInterface +} + +// CVOConfigInterface has methods to work with CVOConfig resources. +type CVOConfigInterface interface { + Create(*v1.CVOConfig) (*v1.CVOConfig, error) + Update(*v1.CVOConfig) (*v1.CVOConfig, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.CVOConfig, error) + List(opts metav1.ListOptions) (*v1.CVOConfigList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CVOConfig, err error) + CVOConfigExpansion +} + +// cVOConfigs implements CVOConfigInterface +type cVOConfigs struct { + client rest.Interface + ns string +} + +// newCVOConfigs returns a CVOConfigs +func newCVOConfigs(c *ClusterversionV1Client, namespace string) *cVOConfigs { + return &cVOConfigs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cVOConfig, and returns the corresponding cVOConfig object, and an error if there is any. +func (c *cVOConfigs) Get(name string, options metav1.GetOptions) (result *v1.CVOConfig, err error) { + result = &v1.CVOConfig{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cvoconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CVOConfigs that match those selectors. +func (c *cVOConfigs) List(opts metav1.ListOptions) (result *v1.CVOConfigList, err error) { + result = &v1.CVOConfigList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cvoconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cVOConfigs. +func (c *cVOConfigs) Watch(opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cvoconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a cVOConfig and creates it. Returns the server's representation of the cVOConfig, and an error, if there is any. +func (c *cVOConfigs) Create(cVOConfig *v1.CVOConfig) (result *v1.CVOConfig, err error) { + result = &v1.CVOConfig{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cvoconfigs"). + Body(cVOConfig). + Do(). + Into(result) + return +} + +// Update takes the representation of a cVOConfig and updates it. Returns the server's representation of the cVOConfig, and an error, if there is any. +func (c *cVOConfigs) Update(cVOConfig *v1.CVOConfig) (result *v1.CVOConfig, err error) { + result = &v1.CVOConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cvoconfigs"). + Name(cVOConfig.Name). + Body(cVOConfig). + Do(). + Into(result) + return +} + +// Delete takes name of the cVOConfig and deletes it. Returns an error if one occurs. +func (c *cVOConfigs) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cvoconfigs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cVOConfigs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cvoconfigs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cVOConfig. +func (c *cVOConfigs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CVOConfig, err error) { + result = &v1.CVOConfig{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cvoconfigs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_clusterversion.openshift.io_client.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_clusterversion.openshift.io_client.go new file mode 100644 index 0000000000..19be76a048 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_clusterversion.openshift.io_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeClusterversionV1 struct { + *testing.Fake +} + +func (c *FakeClusterversionV1) CVOConfigs(namespace string) v1.CVOConfigInterface { + return &FakeCVOConfigs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeClusterversionV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_cvoconfig.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_cvoconfig.go new file mode 100644 index 0000000000..fb2c49114d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/fake/fake_cvoconfig.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clusterversionopenshiftiov1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCVOConfigs implements CVOConfigInterface +type FakeCVOConfigs struct { + Fake *FakeClusterversionV1 + ns string +} + +var cvoconfigsResource = schema.GroupVersionResource{Group: "clusterversion.openshift.io", Version: "v1", Resource: "cvoconfigs"} + +var cvoconfigsKind = schema.GroupVersionKind{Group: "clusterversion.openshift.io", Version: "v1", Kind: "CVOConfig"} + +// Get takes name of the cVOConfig, and returns the corresponding cVOConfig object, and an error if there is any. +func (c *FakeCVOConfigs) Get(name string, options v1.GetOptions) (result *clusterversionopenshiftiov1.CVOConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(cvoconfigsResource, c.ns, name), &clusterversionopenshiftiov1.CVOConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*clusterversionopenshiftiov1.CVOConfig), err +} + +// List takes label and field selectors, and returns the list of CVOConfigs that match those selectors. +func (c *FakeCVOConfigs) List(opts v1.ListOptions) (result *clusterversionopenshiftiov1.CVOConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(cvoconfigsResource, cvoconfigsKind, c.ns, opts), &clusterversionopenshiftiov1.CVOConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &clusterversionopenshiftiov1.CVOConfigList{ListMeta: obj.(*clusterversionopenshiftiov1.CVOConfigList).ListMeta} + for _, item := range obj.(*clusterversionopenshiftiov1.CVOConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cVOConfigs. +func (c *FakeCVOConfigs) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(cvoconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a cVOConfig and creates it. Returns the server's representation of the cVOConfig, and an error, if there is any. +func (c *FakeCVOConfigs) Create(cVOConfig *clusterversionopenshiftiov1.CVOConfig) (result *clusterversionopenshiftiov1.CVOConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cvoconfigsResource, c.ns, cVOConfig), &clusterversionopenshiftiov1.CVOConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*clusterversionopenshiftiov1.CVOConfig), err +} + +// Update takes the representation of a cVOConfig and updates it. Returns the server's representation of the cVOConfig, and an error, if there is any. +func (c *FakeCVOConfigs) Update(cVOConfig *clusterversionopenshiftiov1.CVOConfig) (result *clusterversionopenshiftiov1.CVOConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cvoconfigsResource, c.ns, cVOConfig), &clusterversionopenshiftiov1.CVOConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*clusterversionopenshiftiov1.CVOConfig), err +} + +// Delete takes name of the cVOConfig and deletes it. Returns an error if one occurs. +func (c *FakeCVOConfigs) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cvoconfigsResource, c.ns, name), &clusterversionopenshiftiov1.CVOConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCVOConfigs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cvoconfigsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &clusterversionopenshiftiov1.CVOConfigList{}) + return err +} + +// Patch applies the patch and returns the patched cVOConfig. +func (c *FakeCVOConfigs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *clusterversionopenshiftiov1.CVOConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(cvoconfigsResource, c.ns, name, data, subresources...), &clusterversionopenshiftiov1.CVOConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*clusterversionopenshiftiov1.CVOConfig), err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/generated_expansion.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/generated_expansion.go new file mode 100644 index 0000000000..2bfe462f8c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/clusterversion.openshift.io/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CVOConfigExpansion interface{} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/clusteroperator.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/clusteroperator.go new file mode 100644 index 0000000000..5694c3f747 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/clusteroperator.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + scheme "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterOperatorsGetter has a method to return a ClusterOperatorInterface. +// A group's client should implement this interface. +type ClusterOperatorsGetter interface { + ClusterOperators(namespace string) ClusterOperatorInterface +} + +// ClusterOperatorInterface has methods to work with ClusterOperator resources. +type ClusterOperatorInterface interface { + Create(*v1.ClusterOperator) (*v1.ClusterOperator, error) + Update(*v1.ClusterOperator) (*v1.ClusterOperator, error) + UpdateStatus(*v1.ClusterOperator) (*v1.ClusterOperator, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ClusterOperator, error) + List(opts metav1.ListOptions) (*v1.ClusterOperatorList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterOperator, err error) + ClusterOperatorExpansion +} + +// clusterOperators implements ClusterOperatorInterface +type clusterOperators struct { + client rest.Interface + ns string +} + +// newClusterOperators returns a ClusterOperators +func newClusterOperators(c *OperatorstatusV1Client, namespace string) *clusterOperators { + return &clusterOperators{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the clusterOperator, and returns the corresponding clusterOperator object, and an error if there is any. +func (c *clusterOperators) Get(name string, options metav1.GetOptions) (result *v1.ClusterOperator, err error) { + result = &v1.ClusterOperator{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusteroperators"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterOperators that match those selectors. +func (c *clusterOperators) List(opts metav1.ListOptions) (result *v1.ClusterOperatorList, err error) { + result = &v1.ClusterOperatorList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusteroperators"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterOperators. +func (c *clusterOperators) Watch(opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("clusteroperators"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a clusterOperator and creates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *clusterOperators) Create(clusterOperator *v1.ClusterOperator) (result *v1.ClusterOperator, err error) { + result = &v1.ClusterOperator{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusteroperators"). + Body(clusterOperator). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterOperator and updates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *clusterOperators) Update(clusterOperator *v1.ClusterOperator) (result *v1.ClusterOperator, err error) { + result = &v1.ClusterOperator{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusteroperators"). + Name(clusterOperator.Name). + Body(clusterOperator). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *clusterOperators) UpdateStatus(clusterOperator *v1.ClusterOperator) (result *v1.ClusterOperator, err error) { + result = &v1.ClusterOperator{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusteroperators"). + Name(clusterOperator.Name). + SubResource("status"). + Body(clusterOperator). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterOperator and deletes it. Returns an error if one occurs. +func (c *clusterOperators) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusteroperators"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterOperators) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusteroperators"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterOperator. +func (c *clusterOperators) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterOperator, err error) { + result = &v1.ClusterOperator{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusteroperators"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/doc.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_clusteroperator.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_clusteroperator.go new file mode 100644 index 0000000000..4f3079510f --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_clusteroperator.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + operatorstatusopenshiftiov1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterOperators implements ClusterOperatorInterface +type FakeClusterOperators struct { + Fake *FakeOperatorstatusV1 + ns string +} + +var clusteroperatorsResource = schema.GroupVersionResource{Group: "operatorstatus.openshift.io", Version: "v1", Resource: "clusteroperators"} + +var clusteroperatorsKind = schema.GroupVersionKind{Group: "operatorstatus.openshift.io", Version: "v1", Kind: "ClusterOperator"} + +// Get takes name of the clusterOperator, and returns the corresponding clusterOperator object, and an error if there is any. +func (c *FakeClusterOperators) Get(name string, options v1.GetOptions) (result *operatorstatusopenshiftiov1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(clusteroperatorsResource, c.ns, name), &operatorstatusopenshiftiov1.ClusterOperator{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorstatusopenshiftiov1.ClusterOperator), err +} + +// List takes label and field selectors, and returns the list of ClusterOperators that match those selectors. +func (c *FakeClusterOperators) List(opts v1.ListOptions) (result *operatorstatusopenshiftiov1.ClusterOperatorList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(clusteroperatorsResource, clusteroperatorsKind, c.ns, opts), &operatorstatusopenshiftiov1.ClusterOperatorList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorstatusopenshiftiov1.ClusterOperatorList{ListMeta: obj.(*operatorstatusopenshiftiov1.ClusterOperatorList).ListMeta} + for _, item := range obj.(*operatorstatusopenshiftiov1.ClusterOperatorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterOperators. +func (c *FakeClusterOperators) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(clusteroperatorsResource, c.ns, opts)) + +} + +// Create takes the representation of a clusterOperator and creates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *FakeClusterOperators) Create(clusterOperator *operatorstatusopenshiftiov1.ClusterOperator) (result *operatorstatusopenshiftiov1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(clusteroperatorsResource, c.ns, clusterOperator), &operatorstatusopenshiftiov1.ClusterOperator{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorstatusopenshiftiov1.ClusterOperator), err +} + +// Update takes the representation of a clusterOperator and updates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *FakeClusterOperators) Update(clusterOperator *operatorstatusopenshiftiov1.ClusterOperator) (result *operatorstatusopenshiftiov1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(clusteroperatorsResource, c.ns, clusterOperator), &operatorstatusopenshiftiov1.ClusterOperator{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorstatusopenshiftiov1.ClusterOperator), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterOperators) UpdateStatus(clusterOperator *operatorstatusopenshiftiov1.ClusterOperator) (*operatorstatusopenshiftiov1.ClusterOperator, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(clusteroperatorsResource, "status", c.ns, clusterOperator), &operatorstatusopenshiftiov1.ClusterOperator{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorstatusopenshiftiov1.ClusterOperator), err +} + +// Delete takes name of the clusterOperator and deletes it. Returns an error if one occurs. +func (c *FakeClusterOperators) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(clusteroperatorsResource, c.ns, name), &operatorstatusopenshiftiov1.ClusterOperator{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterOperators) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(clusteroperatorsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &operatorstatusopenshiftiov1.ClusterOperatorList{}) + return err +} + +// Patch applies the patch and returns the patched clusterOperator. +func (c *FakeClusterOperators) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *operatorstatusopenshiftiov1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(clusteroperatorsResource, c.ns, name, data, subresources...), &operatorstatusopenshiftiov1.ClusterOperator{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorstatusopenshiftiov1.ClusterOperator), err +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_operatorstatus.openshift.io_client.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_operatorstatus.openshift.io_client.go new file mode 100644 index 0000000000..835fcddc6d --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/fake/fake_operatorstatus.openshift.io_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeOperatorstatusV1 struct { + *testing.Fake +} + +func (c *FakeOperatorstatusV1) ClusterOperators(namespace string) v1.ClusterOperatorInterface { + return &FakeClusterOperators{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeOperatorstatusV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/generated_expansion.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/generated_expansion.go new file mode 100644 index 0000000000..468407f3f9 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ClusterOperatorExpansion interface{} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/operatorstatus.openshift.io_client.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/operatorstatus.openshift.io_client.go new file mode 100644 index 0000000000..c6bba1f4bf --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/typed/operatorstatus.openshift.io/v1/operatorstatus.openshift.io_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type OperatorstatusV1Interface interface { + RESTClient() rest.Interface + ClusterOperatorsGetter +} + +// OperatorstatusV1Client is used to interact with features provided by the operatorstatus.openshift.io group. +type OperatorstatusV1Client struct { + restClient rest.Interface +} + +func (c *OperatorstatusV1Client) ClusterOperators(namespace string) ClusterOperatorInterface { + return newClusterOperators(c, namespace) +} + +// NewForConfig creates a new OperatorstatusV1Client for the given config. +func NewForConfig(c *rest.Config) (*OperatorstatusV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &OperatorstatusV1Client{client}, nil +} + +// NewForConfigOrDie creates a new OperatorstatusV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *OperatorstatusV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new OperatorstatusV1Client for the given RESTClient. +func New(c rest.Interface) *OperatorstatusV1Client { + return &OperatorstatusV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *OperatorstatusV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/interface.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/interface.go new file mode 100644 index 0000000000..53a8b57fa6 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package clusterversion + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1" + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/cvoconfig.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/cvoconfig.go new file mode 100644 index 0000000000..03cfe8b1a0 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/cvoconfig.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + clusterversionopenshiftiov1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + versioned "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CVOConfigInformer provides access to a shared informer and lister for +// CVOConfigs. +type CVOConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CVOConfigLister +} + +type cVOConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCVOConfigInformer constructs a new informer for CVOConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCVOConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCVOConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCVOConfigInformer constructs a new informer for CVOConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCVOConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterversionV1().CVOConfigs(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterversionV1().CVOConfigs(namespace).Watch(options) + }, + }, + &clusterversionopenshiftiov1.CVOConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *cVOConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCVOConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cVOConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&clusterversionopenshiftiov1.CVOConfig{}, f.defaultInformer) +} + +func (f *cVOConfigInformer) Lister() v1.CVOConfigLister { + return v1.NewCVOConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/interface.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/interface.go new file mode 100644 index 0000000000..a74d23fedd --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CVOConfigs returns a CVOConfigInformer. + CVOConfigs() CVOConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CVOConfigs returns a CVOConfigInformer. +func (v *version) CVOConfigs() CVOConfigInformer { + return &cVOConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/factory.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/factory.go new file mode 100644 index 0000000000..b68a1b306c --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/factory.go @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + clusterversionopenshiftio "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/clusterversion.openshift.io" + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" + operatorstatusopenshiftio "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Clusterversion() clusterversionopenshiftio.Interface + Operatorstatus() operatorstatusopenshiftio.Interface +} + +func (f *sharedInformerFactory) Clusterversion() clusterversionopenshiftio.Interface { + return clusterversionopenshiftio.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Operatorstatus() operatorstatusopenshiftio.Interface { + return operatorstatusopenshiftio.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/generic.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/generic.go new file mode 100644 index 0000000000..9430f3e8aa --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/generic.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + operatorstatusopenshiftiov1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=clusterversion.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("cvoconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Clusterversion().V1().CVOConfigs().Informer()}, nil + + // Group=operatorstatus.openshift.io, Version=v1 + case operatorstatusopenshiftiov1.SchemeGroupVersion.WithResource("clusteroperators"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Operatorstatus().V1().ClusterOperators().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..17c6c9220b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/interface.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/interface.go new file mode 100644 index 0000000000..b7437c8b6a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package operatorstatus + +import ( + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/clusteroperator.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/clusteroperator.go new file mode 100644 index 0000000000..1f27e4aedc --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/clusteroperator.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + operatorstatusopenshiftiov1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + versioned "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterOperatorInformer provides access to a shared informer and lister for +// ClusterOperators. +type ClusterOperatorInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterOperatorLister +} + +type clusterOperatorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterOperatorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterOperatorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorstatusV1().ClusterOperators(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorstatusV1().ClusterOperators(namespace).Watch(options) + }, + }, + &operatorstatusopenshiftiov1.ClusterOperator{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterOperatorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&operatorstatusopenshiftiov1.ClusterOperator{}, f.defaultInformer) +} + +func (f *clusterOperatorInformer) Lister() v1.ClusterOperatorLister { + return v1.NewClusterOperatorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/interface.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/interface.go new file mode 100644 index 0000000000..ff746d7bbd --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/operatorstatus.openshift.io/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterOperators returns a ClusterOperatorInformer. + ClusterOperators() ClusterOperatorInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterOperators returns a ClusterOperatorInformer. +func (v *version) ClusterOperators() ClusterOperatorInformer { + return &clusterOperatorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/cvoconfig.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/cvoconfig.go new file mode 100644 index 0000000000..2f4c2c49ab --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/cvoconfig.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/clusterversion.openshift.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CVOConfigLister helps list CVOConfigs. +type CVOConfigLister interface { + // List lists all CVOConfigs in the indexer. + List(selector labels.Selector) (ret []*v1.CVOConfig, err error) + // CVOConfigs returns an object that can list and get CVOConfigs. + CVOConfigs(namespace string) CVOConfigNamespaceLister + CVOConfigListerExpansion +} + +// cVOConfigLister implements the CVOConfigLister interface. +type cVOConfigLister struct { + indexer cache.Indexer +} + +// NewCVOConfigLister returns a new CVOConfigLister. +func NewCVOConfigLister(indexer cache.Indexer) CVOConfigLister { + return &cVOConfigLister{indexer: indexer} +} + +// List lists all CVOConfigs in the indexer. +func (s *cVOConfigLister) List(selector labels.Selector) (ret []*v1.CVOConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CVOConfig)) + }) + return ret, err +} + +// CVOConfigs returns an object that can list and get CVOConfigs. +func (s *cVOConfigLister) CVOConfigs(namespace string) CVOConfigNamespaceLister { + return cVOConfigNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CVOConfigNamespaceLister helps list and get CVOConfigs. +type CVOConfigNamespaceLister interface { + // List lists all CVOConfigs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.CVOConfig, err error) + // Get retrieves the CVOConfig from the indexer for a given namespace and name. + Get(name string) (*v1.CVOConfig, error) + CVOConfigNamespaceListerExpansion +} + +// cVOConfigNamespaceLister implements the CVOConfigNamespaceLister +// interface. +type cVOConfigNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CVOConfigs in the indexer for a given namespace. +func (s cVOConfigNamespaceLister) List(selector labels.Selector) (ret []*v1.CVOConfig, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CVOConfig)) + }) + return ret, err +} + +// Get retrieves the CVOConfig from the indexer for a given namespace and name. +func (s cVOConfigNamespaceLister) Get(name string) (*v1.CVOConfig, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("cvoconfig"), name) + } + return obj.(*v1.CVOConfig), nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/expansion_generated.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/expansion_generated.go new file mode 100644 index 0000000000..9d8ddf7b1a --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/clusterversion.openshift.io/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// CVOConfigListerExpansion allows custom methods to be added to +// CVOConfigLister. +type CVOConfigListerExpansion interface{} + +// CVOConfigNamespaceListerExpansion allows custom methods to be added to +// CVOConfigNamespaceLister. +type CVOConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/clusteroperator.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/clusteroperator.go new file mode 100644 index 0000000000..7595953829 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/clusteroperator.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterOperatorLister helps list ClusterOperators. +type ClusterOperatorLister interface { + // List lists all ClusterOperators in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) + // ClusterOperators returns an object that can list and get ClusterOperators. + ClusterOperators(namespace string) ClusterOperatorNamespaceLister + ClusterOperatorListerExpansion +} + +// clusterOperatorLister implements the ClusterOperatorLister interface. +type clusterOperatorLister struct { + indexer cache.Indexer +} + +// NewClusterOperatorLister returns a new ClusterOperatorLister. +func NewClusterOperatorLister(indexer cache.Indexer) ClusterOperatorLister { + return &clusterOperatorLister{indexer: indexer} +} + +// List lists all ClusterOperators in the indexer. +func (s *clusterOperatorLister) List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterOperator)) + }) + return ret, err +} + +// ClusterOperators returns an object that can list and get ClusterOperators. +func (s *clusterOperatorLister) ClusterOperators(namespace string) ClusterOperatorNamespaceLister { + return clusterOperatorNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ClusterOperatorNamespaceLister helps list and get ClusterOperators. +type ClusterOperatorNamespaceLister interface { + // List lists all ClusterOperators in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) + // Get retrieves the ClusterOperator from the indexer for a given namespace and name. + Get(name string) (*v1.ClusterOperator, error) + ClusterOperatorNamespaceListerExpansion +} + +// clusterOperatorNamespaceLister implements the ClusterOperatorNamespaceLister +// interface. +type clusterOperatorNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ClusterOperators in the indexer for a given namespace. +func (s clusterOperatorNamespaceLister) List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterOperator)) + }) + return ret, err +} + +// Get retrieves the ClusterOperator from the indexer for a given namespace and name. +func (s clusterOperatorNamespaceLister) Get(name string) (*v1.ClusterOperator, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusteroperator"), name) + } + return obj.(*v1.ClusterOperator), nil +} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/expansion_generated.go b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/expansion_generated.go new file mode 100644 index 0000000000..db80edbb8b --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/generated/listers/operatorstatus.openshift.io/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ClusterOperatorListerExpansion allows custom methods to be added to +// ClusterOperatorLister. +type ClusterOperatorListerExpansion interface{} + +// ClusterOperatorNamespaceListerExpansion allows custom methods to be added to +// ClusterOperatorNamespaceLister. +type ClusterOperatorNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/cluster-version-operator/pkg/version/version.go b/vendor/github.com/openshift/cluster-version-operator/pkg/version/version.go new file mode 100644 index 0000000000..9b226a9426 --- /dev/null +++ b/vendor/github.com/openshift/cluster-version-operator/pkg/version/version.go @@ -0,0 +1,20 @@ +package version + +import ( + "fmt" + "strings" + + "github.com/blang/semver" +) + +var ( + // Raw is the string representation of the version. This will be replaced + // with the calculated version at build time. + Raw = "was not built properly" + + // Version is the semver representation of the version. + Version = semver.MustParse(strings.TrimLeft(Raw, "v")) + + // String is the human-friendly representation of the version. + String = fmt.Sprintf("ClusterVersionOperator %s", Raw) +) From c279137a49fa8b3a0fc7fd47a4dfaae4cc69fd5c Mon Sep 17 00:00:00 2001 From: Miciah Masters Date: Tue, 25 Sep 2018 19:20:35 -0400 Subject: [PATCH 2/2] Publish operator status Create (if necessary) and update a ClusterOperator object to indicate the operator's status. * cmd/cluster-ingress-operator/main.go: Add a watch on daemonsets in the application namespace to trigger reconciliation on status updates. * manifests/00-cluster-role.yaml: Allow the operator to create, get, and update clusteroperators. * pkg/manifests/bindata.go: Regenerate. * pkg/util/clusteroperator/status.go: Add new functions: SetStatusCondition and ConditionsEqual. * pkg/util/clusteroperator/status_test.go: Add tests. * pkg/stub/status.go: Add new functions: syncOperatorStatus, getOperatorState, and computeStatusConditions. Use getOperatorState in syncOperatorStatus to get the prerequisite state for computing the operator's status. Use computeStatusConditions in syncOperatorStatus to compute the status. Use clusteroperator.ConditionsEqual in syncOperatorStatus to determine whether an update is needed. Use clusteroperator.SetStatusCondition in computeStatusConditions to compute new conditions. * pkg/stub/handler.go: Use syncOperatorStatus in Handle to update ClusterOperator. * pkg/stub/handler_test.go: Add tests. --- cmd/cluster-ingress-operator/main.go | 3 + manifests/00-cluster-role.yaml | 9 + pkg/manifests/bindata.go | 8 +- pkg/stub/handler.go | 2 + pkg/stub/status.go | 196 ++++++++++++++++++++++ pkg/stub/status_test.go | 131 +++++++++++++++ pkg/util/clusteroperator/status.go | 68 ++++++++ pkg/util/clusteroperator/status_test.go | 213 ++++++++++++++++++++++++ 8 files changed, 626 insertions(+), 4 deletions(-) create mode 100644 pkg/stub/status.go create mode 100644 pkg/stub/status_test.go create mode 100644 pkg/util/clusteroperator/status.go create mode 100644 pkg/util/clusteroperator/status_test.go diff --git a/cmd/cluster-ingress-operator/main.go b/cmd/cluster-ingress-operator/main.go index f1ed164c27..ec16172eab 100644 --- a/cmd/cluster-ingress-operator/main.go +++ b/cmd/cluster-ingress-operator/main.go @@ -55,6 +55,9 @@ func main() { resyncPeriod := 10 * time.Minute logrus.Infof("Watching %s, %s, %s, %d", resource, kind, namespace, resyncPeriod) sdk.Watch(resource, kind, namespace, resyncPeriod) + // TODO Use a named constant for the router's namespace or get the + // namespace from config. + sdk.Watch("apps/v1", "DaemonSet", "openshift-ingress", resyncPeriod) sdk.Handle(handler) sdk.Run(context.TODO()) } diff --git a/manifests/00-cluster-role.yaml b/manifests/00-cluster-role.yaml index 118d3f099c..38b3b9ac1f 100644 --- a/manifests/00-cluster-role.yaml +++ b/manifests/00-cluster-role.yaml @@ -44,6 +44,15 @@ rules: - list - watch +- apiGroups: + - operatorstatus.openshift.io + resources: + - clusteroperators + verbs: + - create + - get + - update + # Mirrored from assets/router/cluster-role.yaml - apiGroups: - "" diff --git a/pkg/manifests/bindata.go b/pkg/manifests/bindata.go index 72b795ae7e..092c0919f4 100644 --- a/pkg/manifests/bindata.go +++ b/pkg/manifests/bindata.go @@ -7,7 +7,7 @@ // assets/router/namespace.yaml (243B) // assets/router/service-account.yaml (213B) // assets/router/service-cloud.yaml (558B) -// manifests/00-cluster-role.yaml (1.225kB) +// manifests/00-cluster-role.yaml (1.344kB) // manifests/00-custom-resource-definition.yaml (329B) // manifests/00-namespace.yaml (202B) // manifests/01-cluster-role-binding.yaml (369B) @@ -224,7 +224,7 @@ func assetsRouterServiceCloudYaml() (*asset, error) { return a, nil } -var _manifests00ClusterRoleYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x92\x31\x8f\xdb\x30\x0c\x85\x77\xff\x0a\x21\xb7\x15\x88\x83\x6e\x85\xd7\x0e\x9d\xba\x74\xe8\xce\xc8\xcf\x09\x1b\x5b\x14\x48\x2a\xd7\xf6\xd7\x17\x72\x9c\x16\x75\x92\x43\x8b\xc3\x4d\x26\x69\x8a\xfc\xf4\x9e\x9e\xc2\xc7\xb1\x98\x43\x83\xca\x88\x30\x88\x06\x3f\x22\x48\x86\x92\x8b\x06\x76\xc3\x38\xb4\xcd\x89\x53\xdf\x5d\x7b\xbf\xc8\x88\x86\x32\x7f\x85\x1a\x4b\xea\x82\xee\x29\xb6\x54\xfc\x28\xca\x3f\xc9\x59\x52\x7b\xfa\x60\x2d\xcb\xee\xfc\xbe\x99\xe0\xd4\x93\x53\xd7\x84\x90\x68\x42\x57\xa7\x27\x3b\xf2\xe0\x5b\x4e\x07\x85\xd9\xf6\xba\xaf\xd1\x32\xc2\xba\x66\x1b\x28\xf3\x27\x95\x92\xad\x1e\xdb\x86\xcd\xa6\x09\x41\x61\x52\x34\x62\xa9\x45\x49\x03\x1f\x26\xca\x36\xa7\x75\xb6\x65\x8a\xb8\xa4\x06\x3d\x73\x04\xc5\x28\x25\xf9\x5f\xb5\x9a\x9c\xa1\xfb\xeb\x1c\x05\x39\xe6\xf0\x00\x9f\xbf\x23\xdb\x25\x78\x26\x8f\xc7\x39\xea\x31\xc2\xd1\xdc\xa2\x51\x5e\x00\xf0\xdd\x91\xaa\x20\x76\xcb\xda\x13\x26\x49\x06\x5f\xed\xde\xbc\xdb\xdc\x19\xf9\x50\xcf\x3b\x22\x5c\x2c\xa9\xee\xd9\xba\xb0\xe7\xd4\x73\x3a\x3c\xbe\xee\xea\x9a\xb7\x20\x8b\x3f\xed\x6f\xc7\x5e\x62\x58\x9a\xd7\xf2\xae\x97\x3c\x85\xcf\xac\x2a\x8a\x3e\x0c\x2a\x53\x20\xab\xb2\xec\x54\x8a\x43\x77\xcb\xac\x6d\xe5\x6f\x7f\xd0\x34\xfe\xdb\x5b\x40\xea\xb3\xf0\x1d\x9f\x57\xef\xe2\x31\xd7\xab\xb1\xaa\x5f\x48\xce\xf1\x65\xc3\x5c\x4e\x48\x8a\x33\xe3\x79\x81\x2d\xfb\x6f\x88\x4e\x31\xc2\xec\xcf\x8f\x5b\xc7\x5e\x4d\x38\x77\xae\xbd\xbc\xaf\xe7\xdc\xfa\xc6\x82\xfd\x37\xce\xce\x9c\xbc\xac\xa8\x4a\xee\xab\x38\xbf\x02\x00\x00\xff\xff\x7e\x49\x4c\x32\xc9\x04\x00\x00") +var _manifests00ClusterRoleYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x52\xb1\x6e\xdb\x40\x0c\xdd\xf5\x15\x07\x67\x2b\x20\x19\xdd\x0a\xad\x1d\x3a\x75\xe9\xd0\x9d\x3e\x3d\xd9\xac\xa5\xa3\x40\xf2\x9c\xb6\x5f\x5f\x9c\x2c\xa7\x88\x6c\x27\x29\x82\x4e\x3a\x52\xbc\xc7\x77\xef\xbd\x87\xf0\x79\xc8\xe6\xd0\xa0\x32\x20\xf4\xa2\xc1\x0f\x08\x32\x41\xc9\x45\x03\xbb\x61\xe8\x9b\xea\xc8\xa9\x6b\x2f\xb3\xdf\x64\x40\x45\x13\x7f\x87\x1a\x4b\x6a\x83\xee\x28\x36\x94\xfd\x20\xca\xbf\xc9\x59\x52\x73\xfc\x64\x0d\xcb\xf6\xf4\xb1\x1a\xe1\xd4\x91\x53\x5b\x85\x90\x68\x44\x5b\xd0\x93\x1d\xb8\xf7\x9a\xd3\x5e\x61\x56\x5f\xf6\x55\x9a\x07\x58\x5b\xd5\x81\x26\xfe\xa2\x92\x27\x2b\xd7\xea\xb0\xd9\x54\x21\x28\x4c\xb2\x46\x2c\xbd\x28\xa9\xe7\xfd\x48\x93\xcd\x65\xc1\xb6\x89\x22\xce\xa5\x41\x4f\x1c\x41\x31\x4a\x4e\xfe\xac\x57\x8a\x13\x74\x77\xc1\x51\x90\x63\x3e\xee\xe1\xf3\x77\x60\x3b\x1f\x1e\xc9\xe3\x61\x3e\x75\x18\xe0\xa8\xae\xa9\xd1\xb4\x10\xc0\x4f\x47\x2a\x82\xd8\x35\xd7\x8e\x30\x4a\x32\xf8\x6a\xf7\xe6\xc3\xe6\x06\xe4\x5d\x3d\x6f\x88\x70\xb6\xa4\xb8\x67\xeb\xc6\x8e\x53\xc7\x69\x7f\xff\xb9\xab\x67\x5e\x13\x59\xfc\x69\x9e\x1c\x7b\x89\xc3\x32\xbc\x96\xf7\xd5\x25\x17\xf3\xcd\xc9\xf3\x1b\x77\x3d\xdd\x79\xd5\xca\x3c\x75\xa5\x55\x3d\x84\xaf\xac\x2a\x8a\x2e\xf4\x2a\x63\x20\x2b\x6e\x6c\x55\xb2\x43\xb7\x0b\x6c\x5d\x64\x6b\x7e\xd1\x38\xbc\x2d\x82\x48\xdd\x24\x7c\x23\x5e\xab\x38\xde\x97\xe3\xdd\xb4\x4a\x4c\x90\x9c\xe3\xcb\x39\x71\x39\x22\x29\x4e\x8c\xc7\x85\x6c\xde\xfd\x40\x74\x8a\x11\x66\x7f\x7f\x5c\x8b\xf9\x6e\x86\xf3\xe4\xda\xd6\xdb\x7a\xce\xa3\xff\x59\xb0\x7f\xa6\xb3\x3d\x07\xf3\x39\xab\x25\x56\x7f\x02\x00\x00\xff\xff\xa0\x04\x8c\x0b\x40\x05\x00\x00") func manifests00ClusterRoleYamlBytes() ([]byte, error) { return bindataRead( @@ -239,8 +239,8 @@ func manifests00ClusterRoleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/00-cluster-role.yaml", size: 1225, mode: os.FileMode(420), modTime: time.Unix(1, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x72, 0x46, 0x20, 0xd2, 0x4, 0x79, 0xf9, 0x19, 0x6c, 0xa6, 0x95, 0xb1, 0xb8, 0x78, 0xc, 0xf0, 0x74, 0xdd, 0x94, 0x4d, 0xdc, 0xdb, 0x66, 0x35, 0x48, 0x6e, 0xaf, 0x5, 0x4d, 0xf9, 0xa3}} + info := bindataFileInfo{name: "manifests/00-cluster-role.yaml", size: 1344, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xc2, 0x35, 0xc7, 0xc5, 0x5, 0xe6, 0x8a, 0xa2, 0x85, 0xec, 0xde, 0x2, 0x4b, 0xfd, 0xd7, 0x9, 0xa0, 0x66, 0x8f, 0xac, 0x6c, 0x1c, 0xd0, 0x8c, 0xaa, 0xd3, 0x74, 0xd9, 0xf4, 0x2a, 0xdb}} return a, nil } diff --git a/pkg/stub/handler.go b/pkg/stub/handler.go index 7356afbc17..3fea32cc00 100644 --- a/pkg/stub/handler.go +++ b/pkg/stub/handler.go @@ -39,6 +39,8 @@ type Handler struct { } func (h *Handler) Handle(ctx context.Context, event sdk.Event) error { + defer h.syncOperatorStatus() + // TODO: This should be adding an item to a rate limited work queue, but for // now correctness is more important than performance. switch o := event.Object.(type) { diff --git a/pkg/stub/status.go b/pkg/stub/status.go new file mode 100644 index 0000000000..29cafd9638 --- /dev/null +++ b/pkg/stub/status.go @@ -0,0 +1,196 @@ +package stub + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" + + ingressv1alpha1 "github.com/openshift/cluster-ingress-operator/pkg/apis/ingress/v1alpha1" + "github.com/openshift/cluster-ingress-operator/pkg/util/clusteroperator" + operatorversion "github.com/openshift/cluster-ingress-operator/version" + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + + "github.com/operator-framework/operator-sdk/pkg/sdk" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// syncOperatorStatus computes the operator's current status and therefrom +// creates or updates the ClusterOperator resource for the operator. +func (h *Handler) syncOperatorStatus() { + co := &osv1.ClusterOperator{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterOperator", + APIVersion: "operatorstatus.openshift.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: h.Namespace, + // TODO Use a named constant or get name from config. + Name: "openshift-ingress", + }, + } + err := sdk.Get(co) + isNotFound := errors.IsNotFound(err) + if err != nil && !isNotFound { + logrus.Errorf("syncOperatorStatus: error getting ClusterOperator %s/%s: %v", + co.Namespace, co.Name, err) + + return + } + + ns, ingresses, daemonsets, err := h.getOperatorState() + if err != nil { + logrus.Errorf("syncOperatorStatus: getOperatorState: %v", err) + + return + } + + oldConditions := co.Status.Conditions + co.Status.Conditions = computeStatusConditions(oldConditions, ns, + ingresses, daemonsets) + + if isNotFound { + co.Status.Version = operatorversion.Version + + if err := sdk.Create(co); err != nil { + logrus.Errorf("syncOperatorStatus: failed to create ClusterOperator %s/%s: %v", + co.Namespace, co.Name, err) + } else { + logrus.Infof("syncOperatorStatus: created ClusterOperator %s/%s (UID %v)", + co.Namespace, co.Name, co.UID) + } + + return + } + + if clusteroperator.ConditionsEqual(oldConditions, co.Status.Conditions) { + return + } + + if err := sdk.Update(co); err != nil { + logrus.Errorf("syncOperatorStatus: failed to update status of ClusterOperator %s/%s: %v", + co.Namespace, co.Name, err) + } +} + +// getOperatorState gets and returns the resources necessary to compute the +// operator's current state. +func (h *Handler) getOperatorState() (*corev1.Namespace, []ingressv1alpha1.ClusterIngress, []appsv1.DaemonSet, error) { + ns, err := h.ManifestFactory.RouterNamespace() + if err != nil { + return nil, nil, nil, fmt.Errorf( + "error building router namespace: %v", err) + } + + if err := sdk.Get(ns); err != nil { + if errors.IsNotFound(err) { + return nil, nil, nil, nil + } + + return nil, nil, nil, fmt.Errorf( + "error getting Namespace %s: %v", ns.Name, err) + } + + ingressList := &ingressv1alpha1.ClusterIngressList{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterIngress", + APIVersion: "ingress.openshift.io/v1alpha1", + }, + } + err = sdk.List(h.Namespace, ingressList, + sdk.WithListOptions(&metav1.ListOptions{})) + if err != nil { + return nil, nil, nil, fmt.Errorf( + "failed to list ClusterIngresses: %v", err) + } + + daemonsetList := &appsv1.DaemonSetList{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + } + err = sdk.List(ns.Name, daemonsetList, + sdk.WithListOptions(&metav1.ListOptions{})) + if err != nil { + return nil, nil, nil, fmt.Errorf( + "failed to list DaemonSets: %v", err) + } + + return ns, ingressList.Items, daemonsetList.Items, nil +} + +// computeStatusConditions computes the operator's current state. +func computeStatusConditions(conditions []osv1.ClusterOperatorStatusCondition, ns *corev1.Namespace, ingresses []ingressv1alpha1.ClusterIngress, daemonsets []appsv1.DaemonSet) []osv1.ClusterOperatorStatusCondition { + failingCondition := &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorFailing, + Status: osv1.ConditionUnknown, + } + if ns == nil { + failingCondition.Status = osv1.ConditionTrue + failingCondition.Reason = "NoNamespace" + failingCondition.Message = "router namespace does not exist" + } else { + failingCondition.Status = osv1.ConditionFalse + } + conditions = clusteroperator.SetStatusCondition(conditions, + failingCondition) + + progressingCondition := &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorProgressing, + Status: osv1.ConditionUnknown, + } + numIngresses := len(ingresses) + numDaemonsets := len(daemonsets) + if numIngresses == numDaemonsets { + progressingCondition.Status = osv1.ConditionFalse + } else { + progressingCondition.Status = osv1.ConditionTrue + progressingCondition.Reason = "Reconciling" + progressingCondition.Message = fmt.Sprintf( + "have %d ingresses, want %d", + numDaemonsets, numIngresses) + } + conditions = clusteroperator.SetStatusCondition(conditions, + progressingCondition) + + availableCondition := &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorAvailable, + Status: osv1.ConditionUnknown, + } + dsAvailable := map[string]bool{} + for _, ds := range daemonsets { + dsAvailable[ds.Name] = ds.Status.NumberAvailable > 0 + } + unavailable := []string{} + for _, ingress := range ingresses { + // TODO Use the manifest to derive the name, or use labels or + // owner references. + name := "router-" + ingress.Name + if available, exists := dsAvailable[name]; !exists { + msg := fmt.Sprintf("no router for ingress %q", + ingress.Name) + unavailable = append(unavailable, msg) + } else if !available { + msg := fmt.Sprintf("ingress %q not available", + ingress.Name) + unavailable = append(unavailable, msg) + } + } + if len(unavailable) == 0 { + availableCondition.Status = osv1.ConditionTrue + } else { + availableCondition.Status = osv1.ConditionFalse + availableCondition.Reason = "IngressUnavailable" + availableCondition.Message = strings.Join(unavailable, + "\n") + } + conditions = clusteroperator.SetStatusCondition(conditions, + availableCondition) + + return conditions +} diff --git a/pkg/stub/status_test.go b/pkg/stub/status_test.go new file mode 100644 index 0000000000..36979f5459 --- /dev/null +++ b/pkg/stub/status_test.go @@ -0,0 +1,131 @@ +package stub + +import ( + "fmt" + "testing" + + ingressv1alpha1 "github.com/openshift/cluster-ingress-operator/pkg/apis/ingress/v1alpha1" + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestComputeStatusConditions(t *testing.T) { + type testInputs struct { + haveNamespace bool + numWanted, numAvailable, numUnavailable int + } + type testOutputs struct { + failing, progressing, available bool + } + testCases := []struct { + description string + inputs testInputs + outputs testOutputs + }{ + {"no namespace", testInputs{false, 0, 0, 0}, testOutputs{true, false, true}}, + {"no ingresses, no routers", testInputs{true, 0, 0, 0}, testOutputs{false, false, true}}, + {"scaling up", testInputs{true, 1, 0, 0}, testOutputs{false, true, false}}, + {"scaling down", testInputs{true, 0, 1, 0}, testOutputs{false, true, true}}, + {"0/2 ingresses available", testInputs{true, 2, 0, 2}, testOutputs{false, false, false}}, + {"1/2 ingresses available", testInputs{true, 2, 1, 1}, testOutputs{false, false, false}}, + {"2/2 ingresses available", testInputs{true, 2, 2, 0}, testOutputs{false, false, true}}, + } + + for _, tc := range testCases { + var ( + namespace *corev1.Namespace + ingresses []ingressv1alpha1.ClusterIngress + daemonsets []appsv1.DaemonSet + + failing, progressing, available osv1.ConditionStatus + ) + if tc.inputs.haveNamespace { + namespace = &corev1.Namespace{} + } + for i := 0; i < tc.inputs.numWanted; i++ { + ingresses = append(ingresses, + ingressv1alpha1.ClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ingress-%d", i+1), + }, + }) + } + numDaemonsets := tc.inputs.numAvailable + tc.inputs.numUnavailable + for i := 0; i < numDaemonsets; i++ { + numberAvailable := 0 + if i < tc.inputs.numAvailable { + numberAvailable = 1 + } + daemonsets = append(daemonsets, appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("router-ingress-%d", + i+1), + }, + Status: appsv1.DaemonSetStatus{ + NumberAvailable: int32(numberAvailable), + }, + }) + } + if tc.outputs.failing { + failing = osv1.ConditionTrue + } else { + failing = osv1.ConditionFalse + } + if tc.outputs.progressing { + progressing = osv1.ConditionTrue + } else { + progressing = osv1.ConditionFalse + } + if tc.outputs.available { + available = osv1.ConditionTrue + } else { + available = osv1.ConditionFalse + } + expected := []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorFailing, + Status: failing, + }, + { + Type: osv1.OperatorProgressing, + Status: progressing, + }, + { + Type: osv1.OperatorAvailable, + Status: available, + }, + } + new := computeStatusConditions( + []osv1.ClusterOperatorStatusCondition{}, + namespace, + ingresses, + daemonsets, + ) + gotExpected := true + if len(new) != len(expected) { + gotExpected = false + } + for _, conditionA := range new { + foundMatchingCondition := false + + for _, conditionB := range expected { + if conditionA.Type == conditionB.Type && + conditionA.Status == conditionB.Status { + foundMatchingCondition = true + break + } + } + + if !foundMatchingCondition { + gotExpected = false + } + } + if !gotExpected { + t.Fatalf("%q: expected %#v, got %#v", tc.description, + expected, new) + } + } +} diff --git a/pkg/util/clusteroperator/status.go b/pkg/util/clusteroperator/status.go new file mode 100644 index 0000000000..d9efc0204b --- /dev/null +++ b/pkg/util/clusteroperator/status.go @@ -0,0 +1,68 @@ +package clusteroperator + +import ( + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition returns the result of setting the specified condition in +// the given slice of conditions. +// TODO Replace with cluster-version-operator's SetOperatorStatusCondition +// once update to a version of cluster-version-operator that has it. +// https://github.com/openshift/cluster-version-operator/blob/fe673cb712fa5e27001488fc088ac91bb553353d/lib/resourcemerge/os.go#L36-L54 +func SetStatusCondition(oldConditions []osv1.ClusterOperatorStatusCondition, condition *osv1.ClusterOperatorStatusCondition) []osv1.ClusterOperatorStatusCondition { + condition.LastTransitionTime = metav1.Now() + + newConditions := []osv1.ClusterOperatorStatusCondition{} + + found := false + for _, c := range oldConditions { + if condition.Type == c.Type { + if condition.Status == c.Status && + condition.Reason == c.Reason && + condition.Message == c.Message { + return oldConditions + } + + found = true + newConditions = append(newConditions, *condition) + } else { + newConditions = append(newConditions, c) + } + } + if !found { + newConditions = append(newConditions, *condition) + } + + return newConditions +} + +// ConditionsEqual returns true if and only if the provided slices of conditions +// (ignoring LastTransitionTime) are equal. +func ConditionsEqual(oldConditions, newConditions []osv1.ClusterOperatorStatusCondition) bool { + if len(newConditions) != len(oldConditions) { + return false + } + + for _, conditionA := range oldConditions { + foundMatchingCondition := false + + for _, conditionB := range newConditions { + // Compare every field except LastTransitionTime. + if conditionA.Type == conditionB.Type && + conditionA.Status == conditionB.Status && + conditionA.Reason == conditionB.Reason && + conditionA.Message == conditionB.Message { + foundMatchingCondition = true + break + } + } + + if !foundMatchingCondition { + return false + } + } + + return true +} diff --git a/pkg/util/clusteroperator/status_test.go b/pkg/util/clusteroperator/status_test.go new file mode 100644 index 0000000000..6be5e0a3c0 --- /dev/null +++ b/pkg/util/clusteroperator/status_test.go @@ -0,0 +1,213 @@ +package clusteroperator + +import ( + "testing" + + osv1 "github.com/openshift/cluster-version-operator/pkg/apis/operatorstatus.openshift.io/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSetStatusCondition(t *testing.T) { + testCases := []struct { + description string + oldConditions []osv1.ClusterOperatorStatusCondition + newCondition *osv1.ClusterOperatorStatusCondition + expected []osv1.ClusterOperatorStatusCondition + }{ + { + description: "new condition", + newCondition: &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + expected: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + }, + }, + { + description: "existing condition, unchanged", + oldConditions: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + }, + newCondition: &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + expected: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + }, + }, + { + description: "existing conditions, one changed", + oldConditions: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorFailing, + Status: osv1.ConditionFalse, + }, + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionFalse, + }, + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionFalse, + }, + }, + newCondition: &osv1.ClusterOperatorStatusCondition{ + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + expected: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorFailing, + Status: osv1.ConditionFalse, + }, + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionFalse, + }, + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + }, + }, + } + + for _, tc := range testCases { + actual := SetStatusCondition(tc.oldConditions, tc.newCondition) + if !ConditionsEqual(actual, tc.expected) { + t.Fatalf("%q: expected %v, got %v", tc.description, + tc.expected, actual) + } + } +} + +func TestConditionsEqual(t *testing.T) { + testCases := []struct { + description string + expected bool + a, b []osv1.ClusterOperatorStatusCondition + }{ + { + description: "empty statuses should be equal", + expected: true, + }, + { + description: "condition LastTransitionTime should be ignored", + expected: true, + a: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + LastTransitionTime: metav1.Unix(0, 0), + }, + }, + b: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + LastTransitionTime: metav1.Unix(1, 0), + }, + }, + }, + { + description: "order of conditions should not matter", + expected: true, + a: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionTrue, + }, + }, + b: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionTrue, + }, + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + }, + }, + { + description: "check missing condition", + expected: false, + a: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionTrue, + }, + }, + b: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionTrue, + }, + { + Type: osv1.OperatorProgressing, + Status: osv1.ConditionTrue, + }, + }, + }, + { + description: "check condition reason differs", + expected: false, + a: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionFalse, + Reason: "foo", + }, + }, + b: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionFalse, + Reason: "bar", + }, + }, + }, + { + description: "check condition message differs", + expected: false, + a: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionFalse, + Message: "foo", + }, + }, + b: []osv1.ClusterOperatorStatusCondition{ + { + Type: osv1.OperatorAvailable, + Status: osv1.ConditionFalse, + Message: "bar", + }, + }, + }, + } + + for _, tc := range testCases { + actual := ConditionsEqual(tc.a, tc.b) + if actual != tc.expected { + t.Fatalf("%q: expected %v, got %v", tc.description, + tc.expected, actual) + } + } +}