diff --git a/glide.lock b/glide.lock index ceb730030..235c6ba2c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 0ab8da287bd54777c925abb2db45ef62e873e7cfff5d6d750714a078288ac3b5 -updated: 2019-07-26T11:36:52.816274936+02:00 +hash: bcfe5e7870d42ea2d5b3d7b7c4b384ec0b42ad978d70a1c8f69987de6b2e469b +updated: 2020-01-22T14:53:35.974683953+01:00 imports: - name: bitbucket.org/ww/goautoneg version: 2ae31c8b6b30d2f4c8100c20d527b571e9c433bb @@ -251,13 +251,13 @@ imports: subpackages: - specs-go - name: github.com/openshift/api - version: b7d4eb0fa1e0c1e03f97c97c08d0ef1f025c0838 + version: d4a64ec2cbd86f11ea74dfdcf6520d5833d0c6cd subpackages: - config/v1 - network/v1 - unidling/v1alpha1 - name: github.com/openshift/client-go - version: a85ea6a6b3a5d2dbe41582ee35695dd4683e1f02 + version: 5a5508328169b8a6992ea4ef711add89ddce3c6d subpackages: - network/clientset/versioned - network/clientset/versioned/scheme @@ -268,7 +268,7 @@ imports: - network/informers/externalversions/network/v1 - network/listers/network/v1 - name: github.com/openshift/library-go - version: 523621232378681359817c52fe07670ce93b83b9 + version: 61f035b9264717e7eea25dfe9db1cc2cb69d7530 subpackages: - pkg/config/leaderelection - pkg/network/networkapihelpers @@ -655,7 +655,8 @@ imports: - pkg/printers - pkg/resource - name: k8s.io/client-go - version: 6ee68ca5fd8355d024d02f9db0b3b667e8357a0f + version: deeef3880f0a4c5f5ca8b2f00479fb80fc80c2a1 + repo: https://github.com/openshift/kubernetes-client-go.git subpackages: - discovery - discovery/cached/disk @@ -847,8 +848,6 @@ imports: version: 51747d6e00da1fc578d5a333a93bb2abcbce7a95 - name: k8s.io/klog version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f - subpackages: - - glog - name: k8s.io/kube-aggregator version: da8327669ac57b6e6a06676eeb7de19c9780f76d - name: k8s.io/kube-controller-manager diff --git a/glide.yaml b/glide.yaml index c3c6c7a37..df65075db 100644 --- a/glide.yaml +++ b/glide.yaml @@ -10,7 +10,8 @@ import: - package: k8s.io/apiserver version: kubernetes-1.14.0 - package: k8s.io/client-go - version: kubernetes-1.14.0 + version: sdn-4.2-kubernetes-1.14.0 + repo: https://github.com/openshift/kubernetes-client-go.git - package: k8s.io/cli-runtime version: kubernetes-1.14.0 - package: k8s.io/cloud-provider @@ -68,11 +69,11 @@ import: # openshift second - package: github.com/openshift/api - version: master + version: release-4.2 - package: github.com/openshift/client-go - version: master + version: release-4.2 - package: github.com/openshift/library-go - version: master + version: release-4.2 # forks third # master @@ -125,7 +126,7 @@ import: version: v1.0.8 # etcd pins a very old version that has contention issues - package: github.com/google/btree - version: master + version: 20236160a414454a9c64b6c8829381c6f4bddcaa # why do we have this? - package: google.golang.org/appengine @@ -140,6 +141,7 @@ import: # force glide to pull this in - package: github.com/google/uuid + version: 8c31c18f31ede9fc8eae72290a7e7a8064e9b3e3 # due to https://github.com/Masterminds/glide/issues/881 manually show where # to get gonum.org/v1/gonum from @@ -152,6 +154,7 @@ import: # https://github.com/kubernetes/kubernetes/pull/72138 - package: bitbucket.org/ww/goautoneg repo: https://github.com/munnerz/goautoneg.git + version: 2ae31c8b6b30d2f4c8100c20d527b571e9c433bb # version that works with runc - package: golang.org/x/sys @@ -160,3 +163,9 @@ import: # version from openshift. may need later fitting - package: github.com/coreos/go-systemd version: 39ca1b05acc7ad1220e09f133283b8859a8b71ab + +# lock some versions down to what they were in earlier 4.2 releases +- package: github.com/containernetworking/cni + version: a7885cb6f8ab03fba07852ded351e4f5e7a112bf +- package: github.com/spf13/pflag + version: 583c0c0531f06d5278b7d917446061adc344b5cd diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go index ae3080093..fd7e79fa0 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -34,21 +34,27 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + math "math" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/authorization/v1/doc.go b/vendor/github.com/openshift/api/authorization/v1/doc.go index 06fb12fce..a66741dce 100644 --- a/vendor/github.com/openshift/api/authorization/v1/doc.go +++ b/vendor/github.com/openshift/api/authorization/v1/doc.go @@ -3,6 +3,7 @@ // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +kubebuilder:validation:Optional // +groupName=authorization.openshift.io // Package v1 is the v1 version of the API. package v1 diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go index c8e008826..245fffeaf 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go @@ -46,18 +46,25 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_api_rbac_v1 "k8s.io/api/rbac/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + math "math" -import io "io" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + k8s_io_api_rbac_v1 "k8s.io/api/rbac/v1" + + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/build/v1/generated.pb.go b/vendor/github.com/openshift/api/build/v1/generated.pb.go index bcec53e13..83221ca91 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/build/v1/generated.pb.go @@ -65,21 +65,27 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import time "time" + math "math" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + time "time" + + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go index fa6f8d924..4ff5208f2 100644 --- a/vendor/github.com/openshift/api/config/v1/doc.go +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -2,6 +2,7 @@ // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +kubebuilder:validation:Optional // +groupName=config.openshift.io // Package v1 is the v1 version of the API. package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 66c342569..35eace370 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -56,6 +56,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &NetworkList{}, &OAuth{}, &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, &Project{}, &ProjectList{}, &Proxy{}, diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 7cca09471..ca36f6777 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -17,6 +17,8 @@ type ConfigMapFileReference struct { // The namespace must be specified at the point of use. type ConfigMapNameReference struct { // name is the metadata.name of the referenced config map + // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -24,6 +26,8 @@ type ConfigMapNameReference struct { // The namespace must be specified at the point of use. type SecretNameReference struct { // name is the metadata.name of the referenced secret + // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index c7ed7e958..fa4088ca7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -9,7 +9,10 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster` +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" type Build struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -23,6 +26,10 @@ type BuildSpec struct { // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that // should be trusted for image pushes and pulls during builds. // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // // +optional AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` // BuildDefaults controls the default information for Builds diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 8f7d67be5..ef04f7a67 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -6,8 +6,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DNS holds cluster-wide information about DNS. The canonical name is `cluster` -// TODO this object is an example of a possible grouping and is subject to change or removal +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` type DNS struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -28,6 +27,8 @@ type DNSSpec struct { // // For example, given the base domain `openshift.example.com`, an API server // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. BaseDomain string `json:"baseDomain"` // publicZone is the location where all the DNS records that are publicly accessible to // the internet exist. diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index f0cf220d3..94eb74116 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -6,7 +6,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Image holds cluster-wide information about how to handle images. The canonical name is `cluster` +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to blacklist/whitelist registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. type Image struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -22,7 +27,7 @@ type Image struct { } type ImageSpec struct { - // AllowedRegistriesForImport limits the container image registries that normal users may import + // allowedRegistriesForImport limits the container image registries that normal users may import // images from. Set this list to the registries that you trust to contain valid Docker // images and that you want applications to be able to import from. Users with // permission to create Images or ImageStreamMappings via the API are not affected by @@ -38,14 +43,14 @@ type ImageSpec struct { // +optional ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that - // should be trusted during imagestream import, pod image pull, and imageregistry - // pullthrough. + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. // The namespace for this config map is openshift-config. // +optional AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - // RegistrySources contains configuration that determines how the container runtime + // registrySources contains configuration that determines how the container runtime // should treat individual registries when accessing images for builds+pods. (e.g. // whether or not to allow insecure access). It does not contain configuration for the // internal cluster registry. @@ -55,10 +60,10 @@ type ImageSpec struct { type ImageStatus struct { - // this value is set by the image registry operator which controls the internal registry hostname - // InternalRegistryHostname sets the hostname for the default internal image + // internalRegistryHostname sets the hostname for the default internal image // registry. The value must be in "hostname[:port]" format. - // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // This value is set by the image registry operator which controls the internal registry + // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY // environment variable but this setting overrides the environment variable. // +optional InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` @@ -83,11 +88,11 @@ type ImageList struct { // RegistryLocation contains a location of the registry specified by the registry domain // name. The domain name might include wildcards, like '*' or '??'. type RegistryLocation struct { - // DomainName specifies a domain name for the registry + // domainName specifies a domain name for the registry // In case the registry use non-standard (80 or 443) port, the port should be included // in the domain name as well. DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) + // insecure indicates whether the registry is secure (https) or insecure (http) // By default (if not specified) the registry is assumed as secure. // +optional Insecure bool `json:"insecure,omitempty"` @@ -95,15 +100,15 @@ type RegistryLocation struct { // RegistrySources holds cluster-wide information about how to handle the registries config. type RegistrySources struct { - // InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. // +optional InsecureRegistries []string `json:"insecureRegistries,omitempty"` - // BlockedRegistries are blacklisted from image pull/push. All other registries are allowed. + // blockedRegistries are blacklisted from image pull/push. All other registries are allowed. // // Only one of BlockedRegistries or AllowedRegistries may be set. // +optional BlockedRegistries []string `json:"blockedRegistries,omitempty"` - // AllowedRegistries are whitelisted for image pull/push. All other registries are blocked. + // allowedRegistries are whitelisted for image pull/push. All other registries are blocked. // // Only one of BlockedRegistries or AllowedRegistries may be set. // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 484a1af0b..d161eb847 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -6,8 +6,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster` -// TODO this object is an example of a possible grouping and is subject to change or removal +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -24,8 +24,13 @@ type Ingress struct { type IngressSpec struct { // domain is used to generate a default host name for a route when the - // route's host name is empty. The generated host name will follow this + // route's host name is empty. The generated host name will follow this // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. Domain string `json:"domain"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index a60c5f7dc..42ec977fd 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -6,14 +6,17 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Network holds cluster-wide information about Network. The canonical name is `cluster` -// TODO this object is an example of a possible grouping and is subject to change or removal +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. type Network struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. // +kubebuilder:validation:Required // +required Spec NetworkSpec `json:"spec"` @@ -25,14 +28,15 @@ type Network struct { // NetworkSpec is the desired network configuration. // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. -// Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after -// installation is not supported. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. type NetworkSpec struct { // IP address pool to use for pod IPs. + // This field is immutable after installation. ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` // IP address pool for services. // Currently, we only support a single entry here. + // This field is immutable after installation. ServiceNetwork []string `json:"serviceNetwork"` // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). @@ -40,10 +44,12 @@ type NetworkSpec struct { // or else no networking will be installed. // Currently supported values are: // - OpenShiftSDN + // This field is immutable after installation. NetworkType string `json:"networkType"` // externalIP defines configuration for controllers that - // affect Service.ExternalIP + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. // +optional ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` } @@ -78,8 +84,7 @@ type ClusterNetworkEntry struct { // of a Service resource. type ExternalIPConfig struct { // policy is a set of restrictions applied to the ExternalIP field. - // If nil, any value is allowed for an ExternalIP. If the empty/zero - // policy is supplied, then ExternalIP is not allowed to be set. + // If nil or empty, then ExternalIP is not allowed to be set. // +optional Policy *ExternalIPPolicy `json:"policy,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 000000000..31291dec2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,78 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +kubebuilder:subresource:status +// +genclient:nonNamespaced +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:"",omitempty` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 821ae8975..1413a48ca 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -42,18 +42,18 @@ type ProxySpec struct { // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used // for client egress HTTPS connections. The certificate bundle must be from the CA - // that signed the proxy's certificate and be signed for everything. trustedCA should - // only be consumed by a proxy validator. The validator is responsible for reading - // ConfigMapNameReference, validating the certificate and copying "ca-bundle.crt" - // from data to a ConfigMap in the namespace of an operator configured for proxy. - // The namespace for this ConfigMap is "openshift-config-managed". Here is an example - // ConfigMap (in yaml): + // that signed the proxy's certificate and be signed for everything. The trustedCA + // field should only be consumed by a proxy validator. The validator is responsible + // for reading the certificate bundle from required key "ca-bundle.crt" and copying + // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed" + // namespace. The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): // // apiVersion: v1 // kind: ConfigMap // metadata: - // name: proxy-ca - // namespace: openshift-config-managed + // name: user-ca-bundle + // namespace: openshift-config // data: // ca-bundle.crt: | // -----BEGIN CERTIFICATE----- diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 4a6bfba6f..9b8fa3a52 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -6,7 +6,8 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Scheduler holds cluster-wide information about Scheduler. The canonical name is `cluster` +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. type Scheduler struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 4fa507b16..3d44627f9 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1571,6 +1571,39 @@ func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { *out = *in @@ -2465,6 +2498,109 @@ func (in *OperandVersion) DeepCopy() *OperandVersion { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 868921b77..7e0abdd9b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -326,7 +326,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { } var map_Build = map[string]string{ - "": "Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`", + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"", "spec": "Spec holds user-settable values for the build controller configuration", } @@ -365,7 +365,7 @@ func (BuildOverrides) SwaggerDoc() map[string]string { } var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.", + "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", "buildDefaults": "BuildDefaults controls the default information for Builds", "buildOverrides": "BuildOverrides controls override settings for builds", } @@ -584,7 +584,7 @@ func (ConsoleStatus) SwaggerDoc() map[string]string { } var map_DNS = map[string]string{ - "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`", + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", @@ -603,7 +603,7 @@ func (DNSList) SwaggerDoc() map[string]string { } var map_DNSSpec = map[string]string{ - "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.", + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", } @@ -660,7 +660,7 @@ func (FeatureGateSelection) SwaggerDoc() map[string]string { } var map_Image = map[string]string{ - "": "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`", + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to blacklist/whitelist registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", @@ -679,10 +679,10 @@ func (ImageList) SwaggerDoc() map[string]string { } var map_ImageSpec = map[string]string{ - "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", - "registrySources": "RegistrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", } func (ImageSpec) SwaggerDoc() map[string]string { @@ -690,7 +690,7 @@ func (ImageSpec) SwaggerDoc() map[string]string { } var map_ImageStatus = map[string]string{ - "internalRegistryHostname": "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", } @@ -700,8 +700,8 @@ func (ImageStatus) SwaggerDoc() map[string]string { var map_RegistryLocation = map[string]string{ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", } func (RegistryLocation) SwaggerDoc() map[string]string { @@ -710,9 +710,9 @@ func (RegistryLocation) SwaggerDoc() map[string]string { var map_RegistrySources = map[string]string{ "": "RegistrySources holds cluster-wide information about how to handle the registries config.", - "insecureRegistries": "InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", - "blockedRegistries": "BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", - "allowedRegistries": "AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", } func (RegistrySources) SwaggerDoc() map[string]string { @@ -828,7 +828,7 @@ func (PlatformStatus) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`", + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", @@ -847,7 +847,7 @@ func (IngressList) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".", + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -866,7 +866,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_ExternalIPConfig = map[string]string{ "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", - "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil, any value is allowed for an ExternalIP. If the empty/zero policy is supplied, then ExternalIP is not allowed to be set.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", } @@ -885,9 +885,9 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string { } var map_Network = map[string]string{ - "": "Network holds cluster-wide information about Network. The canonical name is `cluster`", + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.", "metadata": "Standard object's metadata.", - "spec": "spec holds user settable values for configuration.", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "status": "status holds observed values from the cluster. They may not be overridden.", } @@ -904,11 +904,11 @@ func (NetworkList) SwaggerDoc() map[string]string { } var map_NetworkSpec = map[string]string{ - "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after installation is not supported.", - "clusterNetwork": "IP address pool to use for pod IPs.", - "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN", - "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP", + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", } func (NetworkSpec) SwaggerDoc() map[string]string { @@ -1146,6 +1146,61 @@ func (TokenConfig) SwaggerDoc() map[string]string { return map_TokenConfig } +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + var map_Project = map[string]string{ "": "Project holds cluster-wide information about Project. The canonical name is `cluster`", "metadata": "Standard object's metadata.", @@ -1208,7 +1263,7 @@ var map_ProxySpec = map[string]string{ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", - "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. trustedCA should only be consumed by a proxy validator. The validator is responsible for reading ConfigMapNameReference, validating the certificate and copying \"ca-bundle.crt\" from data to a ConfigMap in the namespace of an operator configured for proxy. The namespace for this ConfigMap is \"openshift-config-managed\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: proxy-ca\n namespace: openshift-config-managed\n data:\n ca-bundle.crt: |", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", } func (ProxySpec) SwaggerDoc() map[string]string { @@ -1227,7 +1282,7 @@ func (ProxyStatus) SwaggerDoc() map[string]string { } var map_Scheduler = map[string]string{ - "": "Scheduler holds cluster-wide information about Scheduler. The canonical name is `cluster`", + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", diff --git a/vendor/github.com/openshift/api/console/v1/register.go b/vendor/github.com/openshift/api/console/v1/register.go index 7b6e1e0b6..98363daa3 100644 --- a/vendor/github.com/openshift/api/console/v1/register.go +++ b/vendor/github.com/openshift/api/console/v1/register.go @@ -32,9 +32,13 @@ func Resource(resource string) schema.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, &ConsoleLink{}, + &ConsoleLinkList{}, &ConsoleCLIDownload{}, + &ConsoleCLIDownloadList{}, &ConsoleNotification{}, + &ConsoleNotificationList{}, &ConsoleExternalLogLink{}, + &ConsoleExternalLogLinkList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go index b6a1033d2..c9c3317d5 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go @@ -41,3 +41,12 @@ type ConsoleExternalLogLinkSpec struct { // + optional NamespaceFilter string `json:"namespaceFilter,omitempty"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleExternalLogLinkList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata"` + Items []ConsoleExternalLogLink `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index d61cfcd55..8c1b2694a 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -132,6 +132,39 @@ func (in *ConsoleExternalLogLink) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleExternalLogLinkList) DeepCopyInto(out *ConsoleExternalLogLinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleExternalLogLink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleExternalLogLinkList. +func (in *ConsoleExternalLogLinkList) DeepCopy() *ConsoleExternalLogLinkList { + if in == nil { + return nil + } + out := new(ConsoleExternalLogLinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleExternalLogLinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsoleExternalLogLinkSpec) DeepCopyInto(out *ConsoleExternalLogLinkSpec) { *out = *in diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index 3e65772c7..4dbec77d0 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -58,6 +58,14 @@ func (ConsoleExternalLogLink) SwaggerDoc() map[string]string { return map_ConsoleExternalLogLink } +var map_ConsoleExternalLogLinkList = map[string]string{ + "metadata": "Standard object's metadata.", +} + +func (ConsoleExternalLogLinkList) SwaggerDoc() map[string]string { + return map_ConsoleExternalLogLinkList +} + var map_ConsoleExternalLogLinkSpec = map[string]string{ "": "ConsoleExternalLogLinkSpec is the desired log link configuration. The log link will appear on the logs tab of the pod details page.", "text": "text is the display text for the link", diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go index ae113b7d3..f985843fd 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -45,19 +45,25 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" -import strings "strings" -import reflect "reflect" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import io "io" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/network/v1/generated.pb.go b/vendor/github.com/openshift/api/network/v1/generated.pb.go index 9264f7664..34a5dabb9 100644 --- a/vendor/github.com/openshift/api/network/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/network/v1/generated.pb.go @@ -23,14 +23,19 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" -import io "io" + math "math" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go index 0a4f507f1..13ce3d911 100644 --- a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go @@ -23,14 +23,19 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" -import io "io" + math "math" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index f5fa81ce8..17efe35a4 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -10,7 +10,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.labelSelector +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector // IngressController describes a managed ingress controller for the cluster. The // controller can service OpenShift Route and Kubernetes Ingress resources. @@ -70,9 +70,9 @@ type IngressControllerSpec struct { // If unset, the default is based on // infrastructure.config.openshift.io/cluster .status.platform: // - // AWS: LoadBalancerService - // Azure: LoadBalancerService - // GCP: LoadBalancerService + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) // Libvirt: HostNetwork // // Any other platform types (including None) default to HostNetwork. @@ -186,9 +186,10 @@ var ( // LoadBalancerStrategy holds parameters for a load balancer. type LoadBalancerStrategy struct { // scope indicates the scope at which the load balancer is exposed. - // Possible values are "External" and "Internal". The default is - // "External". - // +optional + // Possible values are "External" and "Internal". + // + // +kubebuilder:validation:Required + // +required Scope LoadBalancerScope `json:"scope"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index cdc3fd0f9..953b30f8d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -236,6 +236,20 @@ type KuryrConfig struct { // The port kuryr-controller will listen for readiness and liveness requests. // +optional ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` + + // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for + // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses + // two IPs from that network for each loadbalancer - one given by OpenShift and second + // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's + // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` + // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` + // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then + // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that + // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set + // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix + // size by 1. + // +optional + OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` } // ovnKubernetesConfig is the proposed configuration parameters for networks diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index feee6b548..4b63a8991 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -270,7 +270,7 @@ var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService\n Azure: LoadBalancerService\n GCP: LoadBalancerService\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", @@ -296,7 +296,7 @@ func (IngressControllerStatus) SwaggerDoc() map[string]string { var map_LoadBalancerStrategy = map[string]string{ "": "LoadBalancerStrategy holds parameters for a load balancer.", - "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\". The default is \"External\".", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", } func (LoadBalancerStrategy) SwaggerDoc() map[string]string { @@ -401,9 +401,10 @@ func (IPAMConfig) SwaggerDoc() map[string]string { } var map_KuryrConfig = map[string]string{ - "": "KuryrConfig configures the Kuryr-Kubernetes SDN", - "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", - "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", + "": "KuryrConfig configures the Kuryr-Kubernetes SDN", + "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", + "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", + "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", } func (KuryrConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go index 018c6acd1..712479812 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -16,16 +16,21 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + math "math" -import io "io" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/quota/v1/generated.pb.go b/vendor/github.com/openshift/api/quota/v1/generated.pb.go index 94ff47237..3c6dedc78 100644 --- a/vendor/github.com/openshift/api/quota/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/quota/v1/generated.pb.go @@ -19,18 +19,23 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" -import strings "strings" -import reflect "reflect" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go index dde33a994..6210b9ff7 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -21,18 +21,23 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + math "math" -import strings "strings" -import reflect "reflect" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import io "io" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 768a8dee8..4ea188195 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -84,7 +84,7 @@ type RouteSpec struct { // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. // // +optional - Subdomain string `json:"subdomain" protobuf:"bytes,8,opt,name=subdomain"` + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,8,opt,name=subdomain"` // path that the router watches for, to route traffic for to the service. Optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` diff --git a/vendor/github.com/openshift/api/security/v1/generated.pb.go b/vendor/github.com/openshift/api/security/v1/generated.pb.go index 02fbad99b..4d73de075 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/security/v1/generated.pb.go @@ -30,16 +30,21 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + math "math" -import io "io" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/template/v1/generated.pb.go b/vendor/github.com/openshift/api/template/v1/generated.pb.go index 1de3d1417..a497b86e2 100644 --- a/vendor/github.com/openshift/api/template/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/template/v1/generated.pb.go @@ -25,20 +25,25 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + proto "github.com/gogo/protobuf/proto" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + math "math" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" -import strings "strings" -import reflect "reflect" + k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" -import io "io" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/api/user/v1/generated.pb.go b/vendor/github.com/openshift/api/user/v1/generated.pb.go index 172e4a7cb..796f47553 100644 --- a/vendor/github.com/openshift/api/user/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/user/v1/generated.pb.go @@ -19,16 +19,21 @@ */ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import reflect "reflect" + math "math" -import io "io" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + strings "strings" + + reflect "reflect" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index b3739d131..1608395ba 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: 595563cffda70c75833adcf07415011d115db7218cbbddc4c14f1684ad39638a -updated: 2019-07-20T21:53:33.024857-04:00 +updated: 2019-08-13T12:05:19.648281606-04:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -41,7 +41,7 @@ imports: - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 - name: github.com/openshift/api - version: 9525304a0adb725ab4a4a54539a1a6bf6cc343d3 + version: b5570061b31fed3b06c24077c534b1a1bf7ecf8b subpackages: - apps/v1 - authorization/v1 @@ -105,7 +105,7 @@ imports: subpackages: - imports - name: google.golang.org/appengine - version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880 + version: fb139bde60fa77cede04f226b4d5a3cf68dcce27 subpackages: - internal - internal/base diff --git a/vendor/github.com/openshift/library-go/Makefile b/vendor/github.com/openshift/library-go/Makefile index c7f0ce5df..2704d89e4 100644 --- a/vendor/github.com/openshift/library-go/Makefile +++ b/vendor/github.com/openshift/library-go/Makefile @@ -2,7 +2,7 @@ all: build .PHONY: all # All the go packages (e.g. for verfy) -GO_PACKAGES :=./pkg/... ./cmd/... +GO_PACKAGES :=./pkg/... # Packages to be compiled GO_BUILD_PACKAGES :=$(GO_PACKAGES) # Do not auto-expand packages for libraries or it would compile them separately diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile index 0d2ccbee3..2ec73b087 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile @@ -10,10 +10,20 @@ examples :=$(wildcard ./make/examples/*/Makefile.test) # $3 - output folder # We need to change dir to the final makefile directory or relative paths won't match. # Dynamic values are replaced with "" so we can do diff against checkout versions. +# Avoid comparing local paths by stripping the prefix. +# Delete lines referencing temporary files and directories +# Unify make error output between versions +# Ignore old cp errors on centos7 define update-makefile-log mkdir -p "$(3)" -$(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ +set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ sed 's/\.\(buildDate\|versionFromGit\|commitFromGit\|gitTreeState\)="[^"]*" /.\1="" /g' | \ + sed -E 's~/.*/(github.com/openshift/library-go/alpha-build-machinery/.*)~/\1~g' | \ + sed '/\/tmp\/tmp./d' | \ + sed '/git checkout -b/d' | \ + sed -E 's~^[<> ]*((\+\+\+|\-\-\-) \./(testing/)?manifests/.*.yaml).*~\1~' | \ + sed -E 's/^(make\[2\]: \*\*\* \[).*: (.*\] Error 1)/\1\2/' | \ + grep -v 'are the same file' | \ tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go new file mode 100644 index 000000000..a093b4bd1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go @@ -0,0 +1,14 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery + +// this is a dependency magnet to make it easier to pull in the build-machinery. We want a single import to pull all of it in. +import ( + _ "github.com/openshift/library-go/alpha-build-machinery/make" + _ "github.com/openshift/library-go/alpha-build-machinery/make/lib" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/golang" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator" + _ "github.com/openshift/library-go/alpha-build-machinery/scripts" +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk index b8a8112c0..fffc5b3a3 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk @@ -20,12 +20,12 @@ CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 # $ make -n --print-data-base | grep ^CODEGEN # This will call a macro called "build-image" which will generate image specific targets based on the parameters: -# $0 - macro name -# $1 - target suffix -# $2 - Dockerfile path -# $3 - context directory for image build +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context # It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". -$(call build-image,origin-cluster-openshift-apiserver-operator,./Dockerfile,.) +$(call build-image,ocp-cli,registry.svc.ci.openshift.org/ocp/4.2:cli,./images/cli/Dockerfile.rhel,.) # This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: # $0 - macro name diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log index 3645ce965..92aa6acdb 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log @@ -4,7 +4,7 @@ build clean clean-binaries help -image-origin-cluster-openshift-apiserver-operator +image-ocp-cli images test test-unit diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk index 6e6c03437..564fc1229 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk @@ -15,6 +15,9 @@ update: update-bindata # or self_dir could be modified for the next include by the included file. # Also doing this at the end of the file allows us to user self_dir before it could be modified. include $(addprefix $(self_dir), \ - targets/openshift/*.mk \ + targets/openshift/deps.mk \ + targets/openshift/images.mk \ + targets/openshift/bindata.mk \ + targets/openshift/codegen.mk \ golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore index 2e3892561..d06fd1372 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore @@ -1,2 +1,3 @@ -oc -openshift +/oc +/openshift +/_output/ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile index 17350782a..3ede7702e 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile @@ -1,3 +1,34 @@ include $(addprefix ../../, \ golang.mk \ + targets/openshift/rpm.mk \ + targets/openshift/crd-schema-gen.mk \ ) + +# Set crd-schema-gen variables +CRD_SCHEMA_GEN_APIS :=$(addprefix ./pkg/apis/,v1 v1beta1) +CRD_SCHEMA_GEN_VERSION :=v0.2.1 + +# rpm wants build-id set +GO_LD_EXTRAFLAGS +=-B 0x$$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') + +OUTPUT_DIR :=_output +CROSS_BUILD_BINDIR :=$(OUTPUT_DIR)/bin +RPM_EXTRAFLAGS :=--quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' + +cross-build-darwin-amd64: + +@GOOS=darwin GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/darwin_amd64 +.PHONY: cross-build-darwin-amd64 + +cross-build-windows-amd64: + +@GOOS=windows GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/windows_amd64 +.PHONY: cross-build-windows-amd64 + +cross-build: cross-build-darwin-amd64 cross-build-windows-amd64 +.PHONY: cross-build + +clean-cross-build: + $(RM) -r '$(CROSS_BUILD_BINDIR)' + if [ -d '$(OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(OUTPUT_DIR)'; fi +.PHONY: clean-cross-build + +clean: clean-cross-build diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test index dd788847a..4d4754a63 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test @@ -1,7 +1,10 @@ SHELL :=/bin/bash -euo pipefail -all: - $(MAKE) -C . build +test: | test-build test-cross-build test-rpm test-codegen +.PHONY: test + +test-build: + $(MAKE) build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean [[ -f ./openshift ]] [[ -f ./oc ]] @@ -9,14 +12,57 @@ all: # majorFromGit, minorFromGit are deprecated upstream and set to empty value # we avoid comparing time to avoid flakes diff <( ./oc | sed '$$d' ) <( \ - echo ""; \ - echo ""; \ - git rev-parse --short "HEAD^{commit}" 2>/dev/null; \ - git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown'; \ - git diff --quiet && echo 'clean' || echo 'dirty'; \ + echo '' && \ + echo '' && \ + echo 'aaa' && \ + echo 'v42.43.44' && \ + echo 'clean' \ ) - $(MAKE) -C . clean + $(MAKE) clean + [[ ! -f ./openshift ]] + [[ ! -f ./oc ]] + $(MAKE) clean +.PHONY: test-build + +test-cross-build: + [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) + $(MAKE) cross-build [[ ! -f ./openshift ]] [[ ! -f ./oc ]] -.PHONY: all + [[ -f ./_output/bin/darwin_amd64/openshift ]] + [[ -f ./_output/bin/darwin_amd64/oc ]] + [[ -f ./_output/bin/windows_amd64/openshift.exe ]] + [[ -f ./_output/bin/windows_amd64/oc.exe ]] + + $(MAKE) clean + [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) + $(MAKE) clean +.PHONY: test-cross-build + +test-rpm: + [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) + + $(MAKE) rpm-build + [[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] + [[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] + + $(MAKE) clean + [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) + $(MAKE) clean +.PHONY: test-rpm + +test-codegen: + cp -r ./testing/manifests/initial/* ./manifests/ + diff -Naup ./testing/manifests/initial/ ./manifests/ + ! $(MAKE) verify-codegen-crds + + $(MAKE) update-codegen-crds + $(MAKE) verify-codegen-crds + ! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null + diff -Naup ./testing/manifests/updated/ ./manifests/ 2>/dev/null + + $(MAKE) clean + [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) + $(MAKE) clean +.PHONY: test-codegen diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log index 59aae6474..22359adfb 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log @@ -1,22 +1,292 @@ -make -C . build -fatal: No names found, cannot describe anything. -fatal: No names found, cannot describe anything. -go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" " github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc -go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" " github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +make build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift [[ -f ./openshift ]] [[ -f ./oc ]] # test version is set correctly when linking # majorFromGit, minorFromGit are deprecated upstream and set to empty value # we avoid comparing time to avoid flakes diff <( ./oc | sed '$d' ) <( \ - echo ""; \ - echo ""; \ - git rev-parse --short "HEAD^{commit}" 2>/dev/null; \ - git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown'; \ - git diff --quiet && echo 'clean' || echo 'dirty'; \ + echo '' && \ + echo '' && \ + echo 'aaa' && \ + echo 'v42.43.44' && \ + echo 'clean' \ ) -fatal: No names found, cannot describe anything. -make -C . clean +make clean rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -f ./openshift ]] +[[ ! -f ./oc ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make cross-build +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/oc' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/openshift' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/oc.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/openshift.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift [[ ! -f ./openshift ]] [[ ! -f ./oc ]] +[[ -f ./_output/bin/darwin_amd64/openshift ]] +[[ -f ./_output/bin/darwin_amd64/oc ]] +[[ -f ./_output/bin/windows_amd64/openshift.exe ]] +[[ -f ./_output/bin/windows_amd64/oc.exe ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make rpm-build +rpmbuild -ba --define "_topdir /github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --define "go_package github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' ocp.spec +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +[[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] +[[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +cp -r ./testing/manifests/initial/* ./manifests/ +diff -Naup ./testing/manifests/initial/ ./manifests/ +! make verify-codegen-crds +Installing controller-gen into "_output/tools/src/sigs.k8s.io/controller-tools/controller-gen" +mkdir -p '_output/tools/src/sigs.k8s.io/controller-tools' +git clone -b 'v0.2.1' --single-branch --depth=1 https://github.com/kubernetes-sigs/controller-tools.git '_output/tools/src/sigs.k8s.io/controller-tools' +Cloning into '_output/tools/src/sigs.k8s.io/controller-tools'... +Note: checking out 'ba11932048e4538f6e435f5ca0cdea19bf458338'. + +You are in 'detached HEAD' state. You can look around, make experimental +changes and commit them, and you can discard any commits you make in this +state without impacting any branches by performing another checkout. + +If you want to create a new branch to retain commits you create, you may +do so (now or later) by using -b with the checkout command again. Example: + + +_output/tools/src/sigs.k8s.io/controller-tools/../.. +cd '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src' && go mod vendor 2>/dev/null && go build -mod=vendor ./cmd/controller-gen +Installing yq into '_output/tools/bin/yq' +mkdir -p '_output/tools/bin/' +curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 -o '_output/tools/bin/yq' +chmod +x '_output/tools/bin/yq'; +'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ + schemapatch:manifests="./manifests" \ + paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ +--- ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml +@@ -11,9 +11,39 @@ spec: + scope: "" + version: v1beta1 + versions: +- - name: v1beta1 +- served: true +- storage: true ++ - name: v1beta1 ++ served: true ++ storage: true ++ "validation": ++ "openAPIV3Schema": ++ description: MyOtherOperatorResource is an example operator configuration type ++ type: object ++ required: ++ - metadata ++ - spec ++ properties: ++ apiVersion: ++ description: 'APIVersion defines the versioned schema of this representation ++ of an object. Servers should convert recognized schemas to the latest ++ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' ++ type: string ++ kind: ++ description: 'Kind is a string value representing the REST resource this ++ object represents. Servers may infer this from the endpoint the client ++ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' ++ type: string ++ metadata: ++ type: object ++ spec: ++ type: object ++ required: ++ - deprecatedField ++ - name ++ properties: ++ deprecatedField: ++ type: string ++ name: ++ type: string + status: + acceptedNames: + kind: "" +make[2]: *** [verify-codegen-crds] Error 1 +make update-codegen-crds +Using existing controller-gen from "_output/tools/src/sigs.k8s.io/controller-tools/controller-gen" +Using existing yq from "_output/tools/bin/yq" +'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ + schemapatch:manifests="./manifests" \ + paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ + output:dir="./manifests" +cp -n ./manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch './manifests/' || true # FIXME: centos +_output/tools/bin/yq m -i './manifests/operator.openshift.io_myoperatorresources.crd.yaml' './manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch' +make verify-codegen-crds +Using existing controller-gen from "_output/tools/src/sigs.k8s.io/controller-tools/controller-gen" +Using existing yq from "_output/tools/bin/yq" +'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ + schemapatch:manifests="./manifests" \ + paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ +! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null +diff -Naup ./testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml ./manifests/operator.openshift.io_myoperatorresources.crd.yaml +--- ./testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml ++++ ./manifests/operator.openshift.io_myoperatorresources.crd.yaml +@@ -9,6 +9,11 @@ spec: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" ++ validation: ++ openAPIV3Schema: ++ properties: ++ apiVersion: ++ pattern: ^(test|TEST)$ + status: + acceptedNames: + kind: "" +diff -Naup ./testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml +--- ./testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml ++++ ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml +@@ -11,9 +11,39 @@ spec: + scope: "" + version: v1beta1 + versions: +- - name: v1beta1 +- served: true +- storage: true ++ - name: v1beta1 ++ served: true ++ storage: true ++ "validation": ++ "openAPIV3Schema": ++ description: MyOtherOperatorResource is an example operator configuration type ++ type: object ++ required: ++ - metadata ++ - spec ++ properties: ++ apiVersion: ++ description: 'APIVersion defines the versioned schema of this representation ++ of an object. Servers should convert recognized schemas to the latest ++ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' ++ type: string ++ kind: ++ description: 'Kind is a string value representing the REST resource this ++ object represents. Servers may infer this from the endpoint the client ++ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' ++ type: string ++ metadata: ++ type: object ++ spec: ++ type: object ++ required: ++ - deprecatedField ++ - name ++ properties: ++ deprecatedField: ++ type: string ++ name: ++ type: string + status: + acceptedNames: + kind: "" +diff -Naup ./testing/manifests/updated/ ./manifests/ 2>/dev/null +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f -r '_output/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod' +if [ -d '_output/tools/src/sigs.k8s.io/controller-tools' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/src/sigs.k8s.io/controller-tools'; fi +if [ -d '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/tools/src/pkg/mod'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 000000000..c1a071125 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: ^(test|TEST)$ +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..b9f37c6e2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 000000000..e68fce424 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,52 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: MyOtherOperatorResource is an example operator configuration type + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - deprecatedField + - name + properties: + deprecatedField: + type: string + name: + type: string +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec new file mode 100644 index 000000000..fc4117e0b --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec @@ -0,0 +1,47 @@ +#debuginfo not supported with Go +%global debug_package %{nil} +# modifying the Go binaries breaks the DWARF debugging +%global __os_install_post %{_rpmconfigdir}/brp-compress + +%global golang_version 1.12 +%global product_name OpenShift + +%{!?version: %global version 0.0.1} +%{!?release: %global release 1} + +Name: openshift +Version: %{version} +Release: %{release}%{dist} +Summary: OpenShift client binaries +License: ASL 2.0 +URL: https://%{go_package} + +# If go_arches not defined fall through to implicit golang archs +%if 0%{?go_arches:1} +ExclusiveArch: %{go_arches} +%else +ExclusiveArch: x86_64 aarch64 ppc64le s390x +%endif + +#BuildRequires: bsdtar +BuildRequires: golang >= %{golang_version} + +%description +%{summary} + +%prep + +%build +make build + +%install +install -d %{buildroot}%{_bindir} + +install -p -m 755 oc %{buildroot}%{_bindir}/oc +install -p -m 755 openshift %{buildroot}%{_bindir}/openshift + +%files +%{_bindir}/oc +%{_bindir}/openshift + +%changelog diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go new file mode 100644 index 000000000..fc32adfc7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go new file mode 100644 index 000000000..8b8850b14 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &MyOperatorResource{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go new file mode 100644 index 000000000..d5a512436 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go @@ -0,0 +1,25 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// MyOperatorResource is an example operator configuration type +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOperatorResourceSpec `json:"spec"` +} + +type MyOperatorResourceSpec struct { + Name string `json:"name"` +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go new file mode 100644 index 000000000..ee7e1e723 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go @@ -0,0 +1,5 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go new file mode 100644 index 000000000..3ee83634c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go @@ -0,0 +1,39 @@ +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &MyOtherOperatorResource{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go new file mode 100644 index 000000000..5c1787309 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go @@ -0,0 +1,25 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// MyOtherOperatorResource is an example operator configuration type +type MyOtherOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOtherOperatorResourceSpec `json:"spec"` +} + +type MyOtherOperatorResourceSpec struct { + Name string `json:"name"` + DeprecatedField string `json:"deprecatedField"` +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 000000000..f91f1f63e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..b9f37c6e2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 000000000..622a5279a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 000000000..c1a071125 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: ^(test|TEST)$ +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..b9f37c6e2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 000000000..e68fce424 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,52 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: MyOtherOperatorResource is an example operator configuration type + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - deprecatedField + - name + properties: + deprecatedField: + type: string + name: + type: string +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk index 3a6d2bd3b..2184f1748 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk @@ -1,9 +1,12 @@ GO ?=go GOPATH ?=$(shell $(GO) env GOPATH) -gopath_list :=$(subst :, ,$(strip $(GOPATH))) -# Use every path in GOPATH to try to remove it as a prefix of current dir to determine the package name. -# If the prefix is not removed on subtitution, filter-out unchanged paths. -GO_PACKAGE ?=$(strip $(filter-out $(abspath .),$(foreach p,$(gopath_list),$(patsubst $(p)/src/%,%,$(abspath .))))) +GO_PACKAGE ?=$(shell $(GO) list -e -f '{{ .ImportPath }}' . || echo 'no_package_detected') + +GOOS ?=$(shell $(GO) env GOOS) +GOHOSTOS ?=$(shell $(GO) env GOHOSTOS) +GOARCH ?=$(shell $(GO) env GOARCH) +GOHOSTARCH ?=$(shell $(GO) env GOHOSTARCH) +GOEXE ?=$(shell $(GO) env GOEXE) GOFMT ?=gofmt GOFMT_FLAGS ?=-s -l @@ -17,6 +20,7 @@ GO_BUILD_PACKAGES ?=./cmd/... GO_BUILD_PACKAGES_EXPANDED ?=$(shell $(GO) list $(GO_BUILD_PACKAGES)) go_build_binaries =$(notdir $(GO_BUILD_PACKAGES_EXPANDED)) GO_BUILD_FLAGS ?= +GO_BUILD_BINDIR ?= GO_TEST_FLAGS ?=-race diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk new file mode 100644 index 000000000..a0fb65535 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk @@ -0,0 +1,2 @@ +PERMANENT_TMP :=_output +PERMANENT_TMP_GOPATH :=$(PERMANENT_TMP)/tools diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk index 2f0326a9c..7e6ff98d5 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk @@ -22,12 +22,12 @@ CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 # $ make -n --print-data-base | grep ^CODEGEN # This will call a macro called "build-image" which will generate image specific targets based on the parameters: -# $0 - macro name -# $1 - target suffix -# $2 - Dockerfile path -# $3 - context directory for image build +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context # It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". -$(call build-image,origin-cluster-openshift-apiserver-operator,./Dockerfile,.) +$(call build-image,ocp-openshift-apiserver-operator,registry.svc.ci.openshift.org/ocp/4.2:openshift-apiserver-operator,./Dockerfile.rhel,.) # This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: # $0 - macro name diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log index 3645ce965..a1489d212 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log @@ -4,7 +4,7 @@ build clean clean-binaries help -image-origin-cluster-openshift-apiserver-operator +image-ocp-openshift-apiserver-operator images test test-unit diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk index 581e2ccc4..9a71cb793 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk @@ -1,7 +1,10 @@ self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) define build-package - $(strip $(GO) build $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) $(1)) + $(if $(GO_BUILD_BINDIR),mkdir -p '$(GO_BUILD_BINDIR)',) + $(strip $(GO) build $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) \ + $(if $(GO_BUILD_BINDIR),-o '$(GO_BUILD_BINDIR)/$(notdir $(1))$(GOEXE)',) \ + $(1)) endef @@ -12,6 +15,10 @@ build: clean-binaries: $(RM) $(go_build_binaries) +.PHONY: clean-binaries + +clean: clean-binaries +.PHONY: clean # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk new file mode 100644 index 000000000..fd0ff401e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk @@ -0,0 +1,38 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +CONTROLLER_GEN_VERSION ?=v0.2.1 +CONTROLLER_GEN_TEMP ?=$(PERMANENT_TMP_GOPATH)/src/sigs.k8s.io/controller-tools +controller_gen_gopath =$(shell realpath -m $(CONTROLLER_GEN_TEMP)/../..) +CONTROLLER_GEN ?=$(CONTROLLER_GEN_TEMP)/controller-gen + +ensure-controller-gen: +ifeq "" "$(wildcard $(CONTROLLER_GEN))" + $(info Installing controller-gen into "$(CONTROLLER_GEN)") + mkdir -p '$(CONTROLLER_GEN_TEMP)' + git clone -b '$(CONTROLLER_GEN_VERSION)' --single-branch --depth=1 https://github.com/kubernetes-sigs/controller-tools.git '$(CONTROLLER_GEN_TEMP)' + @echo '$(CONTROLLER_GEN_TEMP)/../..' + cd '$(CONTROLLER_GEN_TEMP)' && export GO111MODULE=on GOPATH='$(controller_gen_gopath)' && $(GO) mod vendor 2>/dev/null && $(GO) build -mod=vendor ./cmd/controller-gen +else + $(info Using existing controller-gen from "$(CONTROLLER_GEN)") +endif +.PHONY: ensure-controller-gen + +clean-controller-gen: + if [ -d '$(controller_gen_gopath)/pkg/mod' ]; then chmod +w -R '$(controller_gen_gopath)/pkg/mod'; fi + $(RM) -r '$(CONTROLLER_GEN_TEMP)' '$(controller_gen_gopath)/pkg/mod' + @mkdir -p '$(CONTROLLER_GEN_TEMP)' # to make sure we can do the next step and to avoid using '/*' wildcard on the line above which could go crazy on wrong substitution + if [ -d '$(CONTROLLER_GEN_TEMP)' ]; then rmdir --ignore-fail-on-non-empty -p '$(CONTROLLER_GEN_TEMP)'; fi + @mkdir -p '$(controller_gen_gopath)/pkg/mod' # to make sure we can do the next step and to avoid using '/*' wildcard on the line above which could go crazy on wrong substitution + if [ -d '$(controller_gen_gopath)/pkg/mod' ]; then rmdir --ignore-fail-on-non-empty -p '$(controller_gen_gopath)/pkg/mod'; fi +.PHONY: clean-controller-gen + +clean: clean-controller-gen + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk new file mode 100644 index 000000000..45a1e44ec --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk @@ -0,0 +1,59 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +CRD_SCHEMA_GEN_APIS ?=$(error CRD_SCHEMA_GEN_APIS is required) +CRD_SCHEMA_GEN_MANIFESTS ?=./manifests +CRD_SCHEMA_GEN_OUTPUT ?=./manifests + +crd_patches =$(subst $(CRD_SCHEMA_GEN_MANIFESTS),$(CRD_SCHEMA_GEN_OUTPUT),$(wildcard $(CRD_SCHEMA_GEN_MANIFESTS)/*.crd.yaml-merge-patch)) + +# $1 - crd file +# $2 - patch file +define patch-crd + $(YQ) m -i '$(1)' '$(2)' + +endef + +empty := +update-codegen-crds: ensure-controller-gen ensure-yq + '$(CONTROLLER_GEN)' \ + schemapatch:manifests="$(CRD_SCHEMA_GEN_MANIFESTS)" \ + paths="$(subst $(empty) ,;,$(CRD_SCHEMA_GEN_APIS))" \ + output:dir="$(CRD_SCHEMA_GEN_OUTPUT)" + cp -n $(wildcard $(CRD_SCHEMA_GEN_MANIFESTS)/*.crd.yaml-merge-patch) '$(CRD_SCHEMA_GEN_OUTPUT)/' || true # FIXME: centos + $(foreach p,$(crd_patches),$(call patch-crd,$(basename $(p)).yaml,$(p))) +.PHONY: update-codegen-crds + +update-generated: update-codegen-crds +.PHONY: update-generated + +update: update-generated +.PHONY: update + +# $1 - manifest (actual) crd +# $2 - temp crd +define diff-crd + diff -Naup $(1) $(2) + +endef + +verify-codegen-crds: CRD_SCHEMA_GEN_OUTPUT :=$(shell mktemp -d) +verify-codegen-crds: update-codegen-crds + $(foreach p,$(wildcard $(CRD_SCHEMA_GEN_MANIFESTS)/*.crd.yaml),$(call diff-crd,$(p),$(subst $(CRD_SCHEMA_GEN_MANIFESTS),$(CRD_SCHEMA_GEN_OUTPUT),$(p)))) +.PHONY: verify-codegen-crds + +verify-generated: verify-codegen-crds +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ + ../../targets/openshift/controller-gen.mk \ + ../../targets/openshift/yq.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk index b48741a73..00e76ac26 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk @@ -1,19 +1,23 @@ -IMAGE_REGISTRY ?= -IMAGE_ORG ?=openshift -IMAGE_TAG ?=latest - - # IMAGE_BUILD_EXTRA_FLAGS lets you add extra flags for imagebuilder # e.g. to mount secrets and repo information into base image like: # make images IMAGE_BUILD_EXTRA_FLAGS='-mount ~/projects/origin-repos/4.2/:/etc/yum.repos.d/' +IMAGE_BUILD_DEFAULT_FLAGS ?=--allow-pull IMAGE_BUILD_EXTRA_FLAGS ?= -# $1 - image name -# $2 - Dockerfile path -# $3 - context +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context define build-image-internal image-$(1): - $(strip imagebuilder --allow-pull $(IMAGE_BUILD_EXTRA_FLAGS) -f $(2) -t $(addsuffix /,$(IMAGE_REGISTRY))$(addsuffix /,$(IMAGE_ORG))$(1)$(addprefix :,$(IMAGE_TAG)) $(3)) + $(strip \ + imagebuilder \ + $(IMAGE_BUILD_DEFAULT_FLAGS) \ + -t $(2) + -f $(3) \ + $(IMAGE_BUILD_EXTRA_FLAGS) \ + $(4) \ + ) .PHONY: image-$(1) images: image-$(1) @@ -21,5 +25,5 @@ images: image-$(1) endef define build-image -$(eval $(call build-image-internal,$(1),$(2),$(3))) +$(eval $(call build-image-internal,$(1),$(2),$(3),$(4))) endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk new file mode 100644 index 000000000..b235197c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk @@ -0,0 +1,41 @@ +RPM_OUTPUT_DIR ?=_output +RPM_TOPDIR ?=$(abspath ./) +RPM_BUILDDIR ?=$(RPM_TOPDIR) +RPM_BUILDROOT ?=$(RPM_TOPDIR) +RPM_SOURCEDIR ?=$(RPM_TOPDIR) +RPM_SPECDIR ?=$(RPM_TOPDIR) +RPM_RPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/rpms +RPM_SRCRPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/srpms + +RPM_SPECFILES ?=$(wildcard *.spec) +RPM_BUILDFLAGS ?=-ba +RPM_EXTRAFLAGS ?= + +rpm-build: + $(strip \ + rpmbuild $(RPM_BUILDFLAGS) \ + --define "_topdir $(RPM_TOPDIR)" \ + --define "_builddir $(RPM_BUILDDIR)" \ + --define "_buildrootdir $(RPM_BUILDROOT)" \ + --define "_rpmdir $(RPM_RPMDIR)" \ + --define "_srcrpmdir $(RPM_SRCRPMDIR)" \ + --define "_specdir $(RPM_SPECDIR)" \ + --define "_sourcedir $(RPM_SOURCEDIR)" \ + --define "go_package $(GO_PACKAGE)" \ + $(RPM_EXTRAFLAGS) \ + $(RPM_SPECFILES) \ + ) + +clean-rpms: + $(RM) -r '$(RPM_RPMDIR)' '$(RPM_SRCRPMDIR)' + if [ -d '$(RPM_OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(RPM_OUTPUT_DIR)'; fi +.PHONY: clean-rpms + +clean: clean-rpms + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk new file mode 100644 index 000000000..7dd556d40 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk @@ -0,0 +1,32 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +YQ ?=$(PERMANENT_TMP_GOPATH)/bin/yq +yq_dir :=$(dir $(YQ)) + + +ensure-yq: +ifeq "" "$(wildcard $(YQ))" + $(info Installing yq into '$(YQ)') + mkdir -p '$(yq_dir)' + curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_$(GOHOSTOS)_$(GOHOSTARCH) -o '$(YQ)' + chmod +x '$(YQ)'; +else + $(info Using existing yq from "$(YQ)") +endif +.PHONY: ensure-yq + +clean-yq: + $(RM) '$(YQ)' + if [ -d '$(yq_dir)' ]; then rmdir --ignore-fail-on-non-empty -p '$(yq_dir)'; fi +.PHONY: clean-yq + +clean: clean-yq + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go deleted file mode 100755 index cb37958a2..000000000 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go +++ /dev/null @@ -1,391 +0,0 @@ -package generator - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/evanphx/json-patch" - "gopkg.in/yaml.v2" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilyaml "k8s.io/apimachinery/pkg/util/yaml" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" -) - -var ( - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - v1beta1.AddToScheme(scheme) -} - -func Run() error { - apisDir := flag.String("apis-dir", "pkg/apis", "the (relative) path to the package with API definitions") - apis := flag.String("apis", "*", "the apis to generate from the apis-dir, in bash glob syntax") - manifestDir := flag.String("manifests-dir", "manifests", "the directory with existing CRD manifests") - outputDir := flag.String("output-dir", "", "optional directory to output the kubebuilder CRDs. By default a temporary directory is used.") - verifyOnly := flag.Bool("verify-only", false, "do not write files, only compare and return with return code 1 if dirty") - domain := flag.String("domain", "", "the domain appended to group names.") - repo := flag.String("repo", "", "the repository package name (optional).") - - flag.Parse() - - // load existing manifests from manifests/ dir - existing, err := crdsFromDirectory(*manifestDir) - if err != nil { - return err - } - - // create temp dir - pwd, err := os.Getwd() - if err != nil { - return err - } - tmpDir, err := ioutil.TempDir(pwd, "") - if err != nil { - return fmt.Errorf("error creating temp directory: %v\n", err) - } - defer os.RemoveAll(tmpDir) - relTmpDir := tmpDir[len(pwd)+1:] - - // find repo in GOPATH - sep := string([]rune{os.PathSeparator}) - GOPATH := strings.TrimRight(os.Getenv("GOPATH"), sep) - if len(*repo) == 0 && len(GOPATH) > 0 && strings.HasPrefix(pwd, filepath.Join(GOPATH, "src")+sep) { - *repo = pwd[len(filepath.Join(GOPATH, "src")+sep):] - fmt.Printf("Derived repo %q from GOPATH and working directory.\n", *repo) - } - - // validate params - if len(*repo) == 0 { - return fmt.Errorf("repo cannot be empty. Run crd-schema-gen in GOPATH or specify repo explicitly.") - } - if len(*domain) == 0 { - return fmt.Errorf("domain cannot be empty.") - } - - // copy APIs to temp dir - fmt.Printf("Copying vendor/github.com/openshift/api/config to temporary pkg/apis...\n") - if err := os.MkdirAll(filepath.Join(tmpDir, "pkg/apis"), 0755); err != nil { - return err - } - cmd := fmt.Sprintf("cp -av \"%s/\"%s \"%s\"", *apisDir, *apis, filepath.Join(tmpDir, "pkg/apis")) - out, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() - if err != nil { - fmt.Print(string(out)) - return err - } - if err := ioutil.WriteFile(filepath.Join(tmpDir, "PROJECT"), []byte(fmt.Sprintf(` -domain: %s -repo: %s/%s -`, *domain, *repo, relTmpDir)), 0644); err != nil { - return err - } - - // generate kubebuilder KindGroupYaml manifests into temp dir - g := crdgenerator.Generator{ - RootPath: tmpDir, - OutputDir: filepath.Join(tmpDir, "manifests"), - SkipMapValidation: true, - } - - if len(*outputDir) != 0 { - g.OutputDir = *outputDir - fmt.Printf("Creating kubebuilder manifests %q ...\n", *outputDir) - } else { - fmt.Printf("Creating kubebuilder manifests ...\n") - } - - if err := g.ValidateAndInitFields(); err != nil { - return err - } - if err := g.Do(); err != nil { - return err - } - - // the generator changes the directory for some reason - os.Chdir(pwd) - - // load kubebuilder manifests from temp dir - fromKubebuilder, err := crdsFromDirectory(g.OutputDir) - if err != nil { - return err - } - - existingFileNames := map[string]string{} - for fn, crd := range existing { - existingFileNames[crd.KindGroup] = fn - } - - // update existing manifests with validations of kubebuilder output - dirty := false - noneFound := true - for fn, withValidation := range fromKubebuilder { - existingFileName, ok := existingFileNames[withValidation.KindGroup] - if !ok { - continue - } - noneFound = false - - crd := existing[existingFileName] - - // TODO: support multiple versions - validation, _, err := nested(withValidation.Yaml, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to access spec.validation in %s: %v", fn, err) - } - - // yaml merge patch exists? - patchFileName := existingFileName + "-merge-patch" - if _, err := os.Stat(patchFileName); err == nil { - fmt.Printf("Applying patch %q ...\n", patchFileName) - - yamlPatch, err := ioutil.ReadFile(patchFileName) - if err != nil { - return fmt.Errorf("failed to read yaml-merge-patch %q: %v", patchFileName, err) - } - var patch yaml.MapSlice - if err := yaml.Unmarshal(yamlPatch, &patch); err != nil { - return fmt.Errorf("failed to unmarshal yaml merge patch %q: %v", patchFileName, err) - } - if !onlyHasNoneOr(patch, "spec", "validation") { - return fmt.Errorf("patch in %q can only have spec.validation", patchFileName) - } - validationPatch, _, err := nested(patch, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to get spec.validation from %q: %v", patchFileName, err) - } - if yamlPatch, err = yaml.Marshal(validationPatch); err != nil { - return fmt.Errorf("failed to marshal spec.validation of %q: %v", patchFileName, err) - } - jsonPatch, err := utilyaml.ToJSON(yamlPatch) - if err != nil { - return fmt.Errorf("failed to convert yaml of %q to json: %v", patchFileName, err) - } - yamlValidation, err := yaml.Marshal(validation) - if err != nil { - return fmt.Errorf("failed to marshal generated validation schema of %q: %v", existingFileName, err) - } - jsonValidation, err := utilyaml.ToJSON(yamlValidation) - if err != nil { - return fmt.Errorf("failed to convert yaml validation of %q to json: %v", existingFileName, err) - } - if jsonValidation, err = jsonpatch.MergePatch(jsonValidation, jsonPatch); err != nil { - return fmt.Errorf("failed to patch %q with %q: %v", existingFileName, patchFileName, err) - } - if err := yaml.Unmarshal(jsonValidation, &validation); err != nil { - return fmt.Errorf("failed to unmarshal patched validation schema of %q: %v", existingFileName, err) - } - } - - if validation == nil { - continue - } - - updated, err := set(crd.Yaml, validation, "spec", "validation") - if err != nil { - return fmt.Errorf("failed to set spec.validation in %s: %v", existingFileName, err) - } - if reflect.DeepEqual(updated, crd.Yaml) { - fmt.Printf("Validation of %s in %s did not change.\n", crd.KindGroup, existingFileName) - continue - } - - bs, err := yaml.Marshal(updated) - if err != nil { - return err - } - - // write updated file, either to old location, or to temp dir in verify mode - newFn := existingFileName - if *verifyOnly { - newFn = filepath.Join(tmpDir, filepath.Base(existingFileName)) - } else { - fmt.Printf("Updating validation of %s in %s.\n", crd.KindGroup, existingFileName) - } - if err := ioutil.WriteFile(newFn, bs, 0644); err != nil { - return err - } - - // compare old and new file - if *verifyOnly { - out, err := exec.Command("diff", "-u", existingFileName, newFn).CombinedOutput() - if err != nil { - fmt.Println(string(out)) - dirty = true - } - } - } - - if noneFound { - fmt.Printf("None of the found API types has a corresponding CRD manifest. These API types where found:\n\n") - for _, withValidation := range fromKubebuilder { - fmt.Printf(" %s\n", withValidation.KindGroup) - } - fmt.Printf("These CRDs were found:\n\n") - for existingKindGroup := range existingFileNames { - fmt.Printf(" %s\n", existingKindGroup) - } - return fmt.Errorf("no API type for found CRD manifests") - } - - if *verifyOnly && dirty { - return fmt.Errorf("verification failed") - } - - return nil -} - -func nested(x interface{}, pth ...string) (interface{}, bool, error) { - if len(pth) == 0 { - return x, true, nil - } - m, ok := x.(yaml.MapSlice) - if !ok { - return nil, false, fmt.Errorf("%s is not an object, but %T", strings.Join(pth, "."), x) - } - for _, item := range m { - s, ok := item.Key.(string) - if !ok { - continue - } - if s == pth[0] { - ret, found, err := nested(item.Value, pth[1:]...) - if err != nil { - return ret, found, fmt.Errorf("%s.%s", pth[0], err) - } - return ret, found, nil - } - } - return nil, false, nil -} - -func set(x interface{}, v interface{}, pth ...string) (interface{}, error) { - if len(pth) == 0 { - return v, nil - } - - if x == nil { - result, err := set(nil, v, pth[1:]...) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - return yaml.MapSlice{yaml.MapItem{Key: pth[0], Value: result}}, nil - } - - m, ok := x.(yaml.MapSlice) - if !ok { - return nil, fmt.Errorf("%s is not an object", strings.Join(pth, ".")) - } - - foundAt := -1 - for i, item := range m { - s, ok := item.Key.(string) - if !ok { - continue - } - if s == pth[0] { - foundAt = i - break - } - } - - if foundAt < 0 { - ret := make(yaml.MapSlice, len(m), len(m)+1) - copy(ret, m) - result, err := set(nil, v, pth[1:]...) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - return append(ret, yaml.MapItem{Key: pth[0], Value: result}), nil - } - - result, err := set(m[foundAt].Value, v, pth[1:]...) - ret := make(yaml.MapSlice, len(m)) - copy(ret, m) - if err != nil { - return nil, fmt.Errorf("%s.%s", pth[0], err) - } - ret[foundAt].Value = result - return ret, nil -} - -// onlyHasNoneOr checks for existance of the given path, but nothing next to it is allowed -func onlyHasNoneOr(x interface{}, pth ...string) bool { - if len(pth) == 0 { - return true - } - m, ok := x.(yaml.MapSlice) - if !ok { - return false - } - switch len(m) { - case 0: - return true - case 1: - s, ok := m[0].Key.(string) - if !ok || s != pth[0] { - return false - } - return onlyHasNoneOr(m[0].Value, pth[1:]...) - default: - return false - } -} - -type KindGroupYaml struct { - KindGroup string - Yaml interface{} -} - -// crdsFromDirectory returns CRDs by file path -func crdsFromDirectory(dir string) (map[string]KindGroupYaml, error) { - ret := map[string]KindGroupYaml{} - infos, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - for _, info := range infos { - if info.IsDir() { - continue - } - if !strings.HasSuffix(info.Name(), ".yaml") { - continue - } - bs, err := ioutil.ReadFile(filepath.Join(dir, info.Name())) - if err != nil { - return nil, err - } - - obj, _, err := codecs.UniversalDeserializer().Decode(bs, nil, nil) - if err != nil { - continue - } - crd, ok := obj.(*v1beta1.CustomResourceDefinition) - if !ok { - continue - } - - var y yaml.MapSlice - if err := yaml.Unmarshal(bs, &y); err != nil { - fmt.Printf("Warning: failed to unmarshal %q, skipping\n", info.Name()) - continue - } - key := crd.Spec.Names.Kind + "." + crd.Spec.Group - ret[filepath.Join(dir, info.Name())] = KindGroupYaml{key, y} - } - if err != nil { - return nil, err - } - return ret, err -} diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go deleted file mode 100755 index 228a800cc..000000000 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/openshift/library-go/cmd/crd-schema-gen/generator" -) - -func main() { - if err := generator.Run(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock index 2c67dcfaf..f448210d4 100644 --- a/vendor/github.com/openshift/library-go/glide.lock +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -1,5 +1,5 @@ -hash: 14182a87b2489ea8cd2db705bf09aad592752d9c9f7cc6cc840a76bcb179a2e8 -updated: 2019-07-14T22:28:29.452706+02:00 +hash: 4812b0cc8114a9f73471b786f4f760b3c363b7fa56a0f8fd83b56d263bc2e616 +updated: 2019-08-15T14:58:38.31621537-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -17,7 +17,7 @@ imports: - name: github.com/certifi/gocertifi version: ee1a9a0726d2ae45f54118cac878c990d4016ded - name: github.com/containerd/continuity - version: aaeac12a7ffcd198ae25440a9dff125c2e2703a7 + version: f2a389ac0a02ce21c09edd7344677a601970f41c subpackages: - pathdriver - name: github.com/coreos/etcd @@ -159,7 +159,7 @@ imports: - name: github.com/docker/go-units version: 519db1ee28dcc9fd2474ae59fca29a810482bfb1 - name: github.com/docker/libnetwork - version: 14f9d751adc2d51b38d14b4e14419b76466d3b94 + version: 7f13a5c99f4bb76a4122035d495984b6a09739bb subpackages: - ipamutils - name: github.com/docker/libtrust @@ -175,7 +175,7 @@ imports: - name: github.com/getsentry/raven-go version: c977f96e109525a5d8fa10a19165341f601f38b0 - name: github.com/ghodss/yaml - version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 + version: 25d852aebe32c875e9c044af3eef9c7dc6bc777f - name: github.com/go-openapi/jsonpointer version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004 - name: github.com/go-openapi/jsonreference @@ -184,8 +184,6 @@ imports: version: 5bae59e25b21498baea7f9d46e9c147ec106a42e - name: github.com/go-openapi/swag version: 5899d5c5e619fda5fa86e14795a835f473ca284c -- name: github.com/gobuffalo/envy - version: 043cb4b8af871b49563291e32c66bb84378a60ac - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -200,12 +198,10 @@ imports: - name: github.com/golang/protobuf version: b4deda0973fb4c70b50d226b1af49f3da59f5265 subpackages: - - jsonpb - proto - ptypes - ptypes/any - ptypes/duration - - ptypes/struct - ptypes/timestamp - name: github.com/gonum/blas version: f22b278b28ac9805aadd613a754a60c35b24ae69 @@ -251,7 +247,7 @@ imports: - compiler - extensions - name: github.com/gorilla/mux - version: d83b6ffe499a29cc05fc977988d0392851779620 + version: e67b3c02c7195c052acff13261f0c9fd1ba53011 - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 2500245aa6110c562d17020fb31a2c133d737799 - name: github.com/hashicorp/golang-lru @@ -262,8 +258,6 @@ imports: version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/joho/godotenv - version: 5c0e6c6ab1a0a9ef0a8822cba3a05d62f7dad941 - name: github.com/json-iterator/go version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 - name: github.com/jteeuwen/go-bindata @@ -274,14 +268,12 @@ imports: - buffer - jlexer - jwriter -- name: github.com/markbates/inflect - version: d582c680dc4d29c2279628ae00e743005bfcd4fe - name: github.com/matttproud/golang_protobuf_extensions version: c12348ce28de40eed0136aa2b644d0ee0650e56c subpackages: - pbutil - name: github.com/Microsoft/go-winio - version: 881e3d46423d592d11da9873ff6581dc577a1d0f + version: 6c72808b55902eae4c5943626030429ff20f3b63 subpackages: - pkg/guid - name: github.com/modern-go/concurrent @@ -302,12 +294,12 @@ imports: - specs-go - specs-go/v1 - name: github.com/opencontainers/runc - version: 6cccc1760d57d9e1bc856b96eeb7ee02b7b8101d + version: 2e94378464ae22b92e1335c200edb37ebc94a1b7 subpackages: - libcontainer/system - libcontainer/user - name: github.com/openshift/api - version: f15120709e0ac8de84e11616d8f0cac54e8f52e3 + version: a94e914914f4228d0bcba6fc8a22614c5f5e2dad subpackages: - apps - apps/v1 @@ -354,7 +346,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: c44a8b61b9f46cd9e802384dfeda0bc9942db68a + version: 5a5508328169b8a6992ea4ef711add89ddce3c6d subpackages: - apps/clientset/versioned/scheme - apps/clientset/versioned/typed/apps/v1 @@ -411,20 +403,10 @@ imports: version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e -- name: github.com/rogpeppe/go-internal - version: 6f68bf1e81f8552c7dbd47f3bc4371c2db0941a6 - subpackages: - - modfile - - module - - semver - name: github.com/sigma/go-inotify version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 - name: github.com/sirupsen/logrus version: 89742aefa4b206dcf400792f3bd35b542998eb3b -- name: github.com/spf13/afero - version: 588a75ec4f32903aa5e39a2619ba6a4631e28424 - subpackages: - - mem - name: github.com/spf13/cobra version: c439c4fa093711d42e1b01acb1235b52004753c1 - name: github.com/spf13/pflag @@ -434,12 +416,22 @@ imports: subpackages: - bcrypt - blowfish + - cryptobyte + - cryptobyte/asn1 + - ed25519 + - ed25519/internal/edwards25519 + - internal/subtle + - nacl/secretbox + - poly1305 + - salsa20/salsa - ssh/terminal - name: golang.org/x/net version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 subpackages: - context - context/ctxhttp + - html + - html/atom - http/httpguts - http2 - http2/hpack @@ -478,10 +470,9 @@ imports: version: 2382e3994d48b1d22acc2c86bcad0a2aff028e32 subpackages: - container/intsets - - go/ast/astutil - imports - name: google.golang.org/appengine - version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880 + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 subpackages: - internal - internal/base @@ -532,7 +523,7 @@ imports: - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/yaml.v2 - version: 51d6538a90f86fe93ac480b35f37b2be17fef232 + version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api version: 40a48860b5abbba9aa891b02b32da429b08d96a0 subpackages: @@ -737,6 +728,8 @@ imports: - discovery - discovery/fake - dynamic + - dynamic/dynamicinformer + - dynamic/dynamiclister - dynamic/fake - informers - informers/admissionregistration @@ -942,14 +935,6 @@ imports: subpackages: - cli/flag - logs -- name: k8s.io/gengo - version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 - subpackages: - - args - - generator - - namer - - parser - - types - name: k8s.io/klog version: 8e90cee79f823779174776412c13478955131846 - name: k8s.io/kube-aggregator @@ -973,18 +958,11 @@ imports: version: c2654d5206da6b7b6ace12841e8f359bb89b443c subpackages: - buffer + - diff + - field - integer + - pointer - trace -- name: sigs.k8s.io/controller-tools - version: 72ae52c08b9dd626cfb64ebef0fbf40ce667939b - repo: https://github.com/openshift/kubernetes-sigs-controller-tools - subpackages: - - pkg/crd/generator - - pkg/crd/util - - pkg/internal/codegen - - pkg/internal/codegen/parse - - pkg/internal/general - - pkg/util - name: sigs.k8s.io/structured-merge-diff version: e85c7b244fd2cc57bb829d73a061f93a441e63ce subpackages: diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml index 920b7ca6e..6d08aa413 100644 --- a/vendor/github.com/openshift/library-go/glide.yaml +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -17,14 +17,6 @@ import: - package: github.com/openshift/client-go version: master -# crd-schema-gen - # TODO: we need to this to get nullable patch, but we will replace this with new repo soon. -- package: sigs.k8s.io/controller-tools - repo: https://github.com/openshift/kubernetes-sigs-controller-tools - version: origin-4.1-kubernetes-1.13.4 -- package: k8s.io/gengo - version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 - # sig-master - needed for file observer - package: github.com/sigma/go-inotify version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go index e9ce7e964..dad537533 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go @@ -149,6 +149,25 @@ func TestCreate(t *testing.T) { testConfigMap.SetName("aggregator-client-ca") testConfigMap.SetNamespace("openshift-kube-apiserver") + testOperatorConfig := &unstructured.Unstructured{} + testOperatorConfig.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfig.SetName("instance") + + testOperatorConfigWithStatus := &unstructured.Unstructured{} + testOperatorConfigWithStatus.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfigWithStatus.SetName("instance") + testOperatorConfigStatusVal := make(map[string]interface{}) + testOperatorConfigStatusVal["initializedValue"] = "something before" + unstructured.SetNestedField(testOperatorConfigWithStatus.Object, testOperatorConfigStatusVal, "status") + tests := []struct { name string discovery []*restmapper.APIGroupResources @@ -165,7 +184,7 @@ func TestCreate(t *testing.T) { { name: "fail to create kube apiserver operator config", discovery: resourcesWithoutKubeAPIServer, - expectFailedCount: 1, + expectFailedCount: 2, expectError: true, expectReload: true, }, @@ -174,6 +193,38 @@ func TestCreate(t *testing.T) { discovery: resources, existingObjects: []runtime.Object{testConfigMap}, }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfig}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 8; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + + ups, ok := actions[6].(ktesting.UpdateAction) + if !ok { + t.Errorf("expecting Update action for actions[5], got %T", actions[5]) + return + } + if got, exp := ups.GetSubresource(), "status"; got != exp { + t.Errorf("ecpecting the subresource to be %q, got %q", exp, got) + return + } + }, + }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfigWithStatus}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 7; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + }, + }, } fakeScheme := runtime.NewScheme() @@ -230,7 +281,7 @@ func TestLoad(t *testing.T) { { name: "read all manifests", assetDir: "testdata", - expectedManifestCount: 5, + expectedManifestCount: 6, }, { name: "handle missing dir", diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go index cec47ed26..cd5f6a749 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -163,36 +163,61 @@ func create(ctx context.Context, manifests map[string]*unstructured.Unstructured continue } + var resource dynamic.ResourceInterface if mappings.Scope.Name() == meta.RESTScopeNameRoot { - _, err = client.Resource(mappings.Resource).Create(manifests[path], metav1.CreateOptions{}) + resource = client.Resource(mappings.Resource) } else { - _, err = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()).Create(manifests[path], metav1.CreateOptions{}) + resource = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()) } - resourceString := mappings.Resource.Resource + "." + mappings.Resource.Version + "." + mappings.Resource.Group + "/" + manifests[path].GetName() + " -n " + manifests[path].GetNamespace() + incluster, err := resource.Create(manifests[path], metav1.CreateOptions{}) + + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + } + // Resource already exists means we already succeeded // This should never happen as we remove already created items from the manifest list, unless the resource existed beforehand. if kerrors.IsAlreadyExists(err) { if options.Verbose { fmt.Fprintf(options.StdErr, "Skipped %q %s as it already exists\n", path, resourceString) } - delete(manifests, path) - continue + incluster, err = resource.Get(manifests[path].GetName(), metav1.GetOptions{}) + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to get already existing %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to get %s: %v", resourceString, err) + continue + } } if err != nil { if options.Verbose { fmt.Fprintf(options.StdErr, "Failed to create %q %s: %v\n", path, resourceString, err) } - errs[path] = fmt.Errorf("failed to create: %v", err) + errs[path] = fmt.Errorf("failed to create %s: %v", resourceString, err) continue } - if options.Verbose { - fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + if _, ok := manifests[path].Object["status"]; ok { + _, found := incluster.Object["status"] + if !found { + incluster.Object["status"] = manifests[path].Object["status"] + incluster, err = resource.UpdateStatus(incluster, metav1.UpdateOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to update status for the %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to update status for %s: %v", resourceString, err) + continue + } + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Updated status for %q %s\n", path, resourceString) + } + } } - // Creation succeeded lets remove the manifest from the list to avoid creating it second time delete(manifests, path) } diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml new file mode 100644 index 000000000..81133ceaa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml @@ -0,0 +1,7 @@ +apiVersion: kubeapiserver.operator.openshift.io/v1alpha1 +kind: KubeAPIServerOperatorConfig +metadata: + name: instance-empty-status +spec: + managementState: Managed +status: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml index fafd307b3..a946007c1 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml @@ -4,3 +4,5 @@ metadata: name: instance spec: managementState: Managed +status: + initializedValue: something diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go index ff4f89792..2ce3bdef0 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -23,6 +23,7 @@ import ( operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" "github.com/openshift/library-go/pkg/config/configdefaults" + "github.com/openshift/library-go/pkg/controller/fileobserver" "github.com/openshift/library-go/pkg/crypto" "github.com/openshift/library-go/pkg/serviceability" @@ -88,7 +89,34 @@ func (c *ControllerCommandConfig) NewCommandWithContext(ctx context.Context) *co klog.Fatal(err) } - if err := c.StartController(shutdownCtx); err != nil { + ctx, terminate := context.WithCancel(shutdownCtx) + defer terminate() + + if len(c.basicFlags.TerminateOnFiles) > 0 { + // setup file observer to terminate when given files change + obs, err := fileobserver.NewObserver(10 * time.Second) + if err != nil { + klog.Fatal(err) + } + files := map[string][]byte{} + for _, fn := range c.basicFlags.TerminateOnFiles { + fileBytes, err := ioutil.ReadFile(fn) + if err != nil { + klog.Warningf("Unable to read initial content of %q: %v", fn, err) + continue // intentionally ignore errors + } + files[fn] = fileBytes + } + obs.AddReactor(func(filename string, action fileobserver.ActionType) error { + klog.Infof("exiting because %q changed", filename) + terminate() + return nil + }, files, c.basicFlags.TerminateOnFiles...) + + go obs.Run(shutdownHandler) + } + + if err := c.StartController(ctx); err != nil { klog.Fatal(err) } }, diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go index 5cdb4190b..fe33b4351 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go @@ -22,6 +22,8 @@ type ControllerFlags struct { ConfigFile string // KubeConfigFile points to a kubeconfig file if you don't want to use the in cluster config KubeConfigFile string + // TerminateOnFiles is a list of files. If any of these changes, the process terminates. + TerminateOnFiles []string } // NewControllerFlags returns flags with default values set @@ -43,6 +45,7 @@ func (f *ControllerFlags) AddFlags(cmd *cobra.Command) { cmd.MarkFlagFilename("config", "yaml", "yml") flags.StringVar(&f.KubeConfigFile, "kubeconfig", f.KubeConfigFile, "Location of the master configuration file to run from.") cmd.MarkFlagFilename("kubeconfig", "kubeconfig") + flags.StringArrayVar(&f.TerminateOnFiles, "terminate-on-files", f.TerminateOnFiles, "A list of files. If one of them changes, the process will terminate.") } // ToConfigObj given completed flags, returns a config object for the flag that was specified. diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go index 781afa5cb..c5a120574 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go @@ -10,7 +10,8 @@ import ( type Observer interface { Run(stopChan <-chan struct{}) - AddReactor(reaction reactorFn, startingFileContent map[string][]byte, files ...string) Observer + HasSynced() bool + AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer } // ActionType define a type of action observed on the file @@ -40,20 +41,26 @@ func (t ActionType) String(filename string) string { return "" } -// reactorFn define a reaction function called when an observed file is modified. -type reactorFn func(file string, action ActionType) error +// ReactorFn define a reaction function called when an observed file is modified. +type ReactorFn func(file string, action ActionType) error // ExitOnChangeReactor provides reactor function that causes the process to exit when the change is detected. -var ExitOnChangeReactor reactorFn = func(filename string, action ActionType) error { - klog.Infof("exiting because %q changed", filename) - os.Exit(0) - return nil +// DEPRECATED: Using this function cause process to exit immediately without proper shutdown (context close/etc.) +// Use the TerminateOnChangeReactor() instead. +var ExitOnChangeReactor = TerminateOnChangeReactor(func() { os.Exit(0) }) + +func TerminateOnChangeReactor(terminateFn func()) ReactorFn { + return func(filename string, action ActionType) error { + klog.Infof("Triggering shutdown because %s", action.String(filename)) + terminateFn() + return nil + } } func NewObserver(interval time.Duration) (Observer, error) { return &pollingObserver{ interval: interval, - reactors: map[string][]reactorFn{}, - files: map[string]string{}, + reactors: map[string][]ReactorFn{}, + files: map[string]fileHashAndState{}, }, nil } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go index 0f3ca8ec8..0b33e79ef 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go @@ -16,14 +16,24 @@ import ( type pollingObserver struct { interval time.Duration - reactors map[string][]reactorFn - files map[string]string + reactors map[string][]ReactorFn + files map[string]fileHashAndState reactorsMutex sync.RWMutex + + syncedMutex sync.RWMutex + hasSynced bool +} + +// HasSynced indicates that the observer synced all observed files at least once. +func (o *pollingObserver) HasSynced() bool { + o.syncedMutex.RLock() + defer o.syncedMutex.RUnlock() + return o.hasSynced } // AddReactor will add new reactor to this observer. -func (o *pollingObserver) AddReactor(reaction reactorFn, startingFileContent map[string][]byte, files ...string) Observer { +func (o *pollingObserver) AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer { o.reactorsMutex.Lock() defer o.reactorsMutex.Unlock() for _, f := range files { @@ -38,14 +48,26 @@ func (o *pollingObserver) AddReactor(reaction reactorFn, startingFileContent map if startingContent, ok := startingFileContent[f]; ok { klog.V(3).Infof("Starting from specified content for file %q", f) - o.files[f], err = calculateHash(bytes.NewBuffer(startingContent)) + // if empty starting content is specified, do not hash the empty string but just return it the same + // way as calculateFileHash() does in that case. + // in case the file exists and is empty, we don't care about the initial content anyway, because we + // are only going to react when the file content change. + // in case the file does not exists but empty string is specified as initial content, without this + // the content will be hashed and reaction will trigger as if the content changed. + if len(startingContent) == 0 { + o.files[f] = fileHashAndState{exists: true} + o.reactors[f] = append(o.reactors[f], reaction) + continue + } + currentHash, emptyFile, err := calculateHash(bytes.NewBuffer(startingContent)) if err != nil { panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) } + o.files[f] = fileHashAndState{exists: true, hash: currentHash, isEmpty: emptyFile} } else { klog.V(3).Infof("Adding reactor for file %q", f) o.files[f], err = calculateFileHash(f) - if err != nil { + if err != nil && !os.IsNotExist(err) { panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) } } @@ -64,36 +86,45 @@ func (o *pollingObserver) processReactors(stopCh <-chan struct{}) { o.reactorsMutex.RLock() defer o.reactorsMutex.RUnlock() for filename, reactors := range o.reactors { - currentHash, err := calculateFileHash(filename) - if err != nil { + currentFileState, err := calculateFileHash(filename) + if err != nil && !os.IsNotExist(err) { return false, err } - lastKnownHash := o.files[filename] - // No file change detected - if lastKnownHash == currentHash { - continue - } - - klog.Infof("Observed change: file:%s (current: %q, lastKnown: %q)", filename, currentHash, lastKnownHash) - o.files[filename] = currentHash + lastKnownFileState := o.files[filename] + o.files[filename] = currentFileState for i := range reactors { - action := FileModified + var action ActionType switch { - case len(lastKnownHash) == 0: + case !lastKnownFileState.exists && !currentFileState.exists: + // skip non-existing file + continue + case !lastKnownFileState.exists && currentFileState.exists && (len(currentFileState.hash) > 0 || currentFileState.isEmpty): + // if we see a new file created that has content or its empty, trigger FileCreate action + klog.Infof("Observed file %q has been created (hash=%q)", filename, currentFileState.hash) action = FileCreated - case len(currentHash) == 0: + case lastKnownFileState.exists && !currentFileState.exists: + klog.Infof("Observed file %q has been deleted", filename) action = FileDeleted - case len(lastKnownHash) > 0: + case lastKnownFileState.hash == currentFileState.hash: + // skip if the hashes are the same + continue + case lastKnownFileState.hash != currentFileState.hash: + klog.Infof("Observed file %q has been modified (old=%q, new=%q)", filename, lastKnownFileState.hash, currentFileState.hash) action = FileModified } - if err := reactors[i](filename, action); err != nil { klog.Errorf("Reactor for %q failed: %v", filename, err) } } } + if !o.HasSynced() { + o.syncedMutex.Lock() + o.hasSynced = true + o.syncedMutex.Unlock() + klog.V(3).Info("File observer successfully synced") + } return false, nil }) if err != nil { @@ -108,33 +139,53 @@ func (o *pollingObserver) Run(stopChan <-chan struct{}) { o.processReactors(stopChan) } -func calculateFileHash(path string) (string, error) { - stat, statErr := os.Stat(path) - if statErr != nil { - if os.IsNotExist(statErr) { - return "", nil - } - return "", statErr +type fileHashAndState struct { + hash string + exists bool + isEmpty bool +} + +func calculateFileHash(path string) (fileHashAndState, error) { + result := fileHashAndState{} + stat, err := os.Stat(path) + if err != nil { + return result, err } + + // this is fatal if stat.IsDir() { - return "", fmt.Errorf("you can watch only files, %s is a directory", path) + return result, fmt.Errorf("you can watch only files, %s is a directory", path) } f, err := os.Open(path) if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err + return result, err } defer f.Close() - return calculateHash(f) + + // at this point we know for sure the file exists and we can read its content even if that content is empty + result.exists = true + + hash, empty, err := calculateHash(f) + if err != nil { + return result, err + } + + result.hash = hash + result.isEmpty = empty + + return result, nil } -func calculateHash(content io.Reader) (string, error) { +func calculateHash(content io.Reader) (string, bool, error) { hasher := sha256.New() - if _, err := io.Copy(hasher, content); err != nil { - return "", err + written, err := io.Copy(hasher, content) + if err != nil { + return "", false, err + } + // written == 0 means the content is empty + if written == 0 { + return "", true, nil } - return hex.EncodeToString(hasher.Sum(nil)), nil + return hex.EncodeToString(hasher.Sum(nil)), false, nil } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go index b56176bc8..32af86433 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go @@ -4,7 +4,9 @@ import ( "fmt" "io/ioutil" "os" + "path" "path/filepath" + "strings" "sync" "testing" "time" @@ -12,6 +14,263 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) +func TestObserverPolling(t *testing.T) { + type observedAction struct { + file string + action ActionType + } + + var ( + nonEmptyContent = []byte("non-empty") + changedContent = []byte("change") + emptyContent = []byte("") + + observedSingleFileCreated = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe created") + return + } + if actions[0].action != FileCreated { + t.Errorf("created action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedSingleFileModified = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe modified") + return + } + if actions[0].action != FileModified { + t.Errorf("modified action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedSingleFileDeleted = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe deleted") + return + } + if actions[0].action != FileDeleted { + t.Errorf("deleted action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedNoChanges = func(actions []observedAction, t *testing.T) { + if len(actions) != 0 { + var result []string + for _, a := range actions { + result = append(result, a.action.String(path.Base(a.file))) + } + t.Errorf("expected to not observe any actions, but observed: %s", strings.Join(result, ",")) + } + } + + defaultTimeout = 5 * time.Second + ) + + tests := []struct { + name string + startFileContent []byte // the content the file is created with initially + changeFileContent []byte // change the file content + deleteFile bool // change the file by deleting it + startWithNoFile bool // start test with no file + setInitialContent bool // set the initial content + initialContent map[string][]byte // initial content to pass to observer + timeout time.Duration // maximum test duration (default: 5s) + waitForObserver time.Duration // duration to wait for observer to sync changes (default: 300ms) + + evaluateActions func([]observedAction, *testing.T) // func to evaluate observed actions + }{ + { + name: "start with existing non-empty file with no change and initial content set", + evaluateActions: observedNoChanges, + setInitialContent: true, + startFileContent: nonEmptyContent, + timeout: 1 * time.Second, + }, + { + name: "start with existing non-empty file with no change and no initial content set", + evaluateActions: observedNoChanges, + startFileContent: nonEmptyContent, + timeout: 1 * time.Second, + }, + { + name: "start with existing non-empty file that change", + evaluateActions: observedSingleFileModified, + setInitialContent: true, + startFileContent: nonEmptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing non-empty file and no initial content that change", + evaluateActions: observedSingleFileModified, + startFileContent: nonEmptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing empty file with no change", + evaluateActions: observedNoChanges, + setInitialContent: true, + startFileContent: emptyContent, + changeFileContent: emptyContent, + }, + { + name: "start with existing empty file and no initial content with no change", + evaluateActions: observedNoChanges, + startFileContent: emptyContent, + changeFileContent: emptyContent, + }, + { + name: "start with existing empty file that change content", + evaluateActions: observedSingleFileModified, + startFileContent: emptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing empty file and empty initial content that change content", + evaluateActions: observedSingleFileModified, + setInitialContent: true, + startFileContent: emptyContent, + changeFileContent: changedContent, + }, + { + name: "start with non-existing file with no change", + evaluateActions: observedNoChanges, + startWithNoFile: true, + }, + { + name: "start with non-existing file that is created as empty file", + evaluateActions: observedSingleFileCreated, + startWithNoFile: true, + changeFileContent: emptyContent, + }, + { + name: "start with non-existing file that is created as non-empty file", + evaluateActions: observedSingleFileCreated, + startWithNoFile: true, + changeFileContent: nonEmptyContent, + }, + { + name: "start with existing file with content that is deleted", + evaluateActions: observedSingleFileDeleted, + setInitialContent: true, + startFileContent: nonEmptyContent, + deleteFile: true, + }, + { + name: "start with existing file with content and not initial content set that is deleted", + evaluateActions: observedSingleFileDeleted, + startFileContent: nonEmptyContent, + deleteFile: true, + }, + } + + baseDir, err := ioutil.TempDir("", "observer-poll-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(baseDir) + + for _, test := range tests { + if test.timeout == 0 { + test.timeout = defaultTimeout + } + t.Run(test.name, func(t *testing.T) { + observer, err := NewObserver(200 * time.Millisecond) + if err != nil { + t.Fatal(err) + } + + testDir := filepath.Join(baseDir, t.Name()) + if err := os.MkdirAll(filepath.Join(baseDir, t.Name()), 0777); err != nil { + t.Fatal(err) + } + + testFile := filepath.Join(testDir, "testfile") + + if test.setInitialContent { + test.initialContent = map[string][]byte{ + testFile: test.startFileContent, + } + } + + if !test.startWithNoFile { + if err := ioutil.WriteFile(testFile, test.startFileContent, os.ModePerm); err != nil { + t.Fatal(err) + } + t.Logf("created file %q with content: %q", testFile, string(test.startFileContent)) + } + + observedChan := make(chan observedAction) + observer.AddReactor(func(file string, action ActionType) error { + t.Logf("observed %q", action.String(path.Base(file))) + observedChan <- observedAction{ + file: file, + action: action, + } + return nil + }, test.initialContent, testFile) + + stopChan := make(chan struct{}) + + // start observing actions + observedActions := []observedAction{} + var observedActionsMutex sync.Mutex + stopObservingChan := make(chan struct{}) + go func() { + for { + select { + case action := <-observedChan: + observedActionsMutex.Lock() + observedActions = append(observedActions, action) + observedActionsMutex.Unlock() + case <-stopObservingChan: + return + } + } + }() + + // start file observer + go observer.Run(stopChan) + + // wait until file observer see the files at least once + if err := wait.PollImmediate(10*time.Millisecond, test.timeout, func() (done bool, err error) { + return observer.HasSynced(), nil + }); err != nil { + t.Errorf("failed to wait for observer to sync: %v", err) + } + t.Logf("starting observing changes ...") + + if test.changeFileContent != nil { + t.Logf("writing %q ...", string(test.changeFileContent)) + if err := ioutil.WriteFile(testFile, test.changeFileContent, os.ModePerm); err != nil { + t.Fatal(err) + } + } + + if test.deleteFile { + if err := os.RemoveAll(testDir); err != nil { + t.Fatal(err) + } + } + + // give observer time to observe latest events + if test.waitForObserver == 0 { + time.Sleep(400 * time.Millisecond) + } else { + time.Sleep(test.waitForObserver) + } + + close(stopObservingChan) + close(stopChan) + + observedActionsMutex.Lock() + defer observedActionsMutex.Unlock() + test.evaluateActions(observedActions, t) // evaluate observed actions + }) + } +} + type reactionRecorder struct { reactions map[string][]ActionType sync.RWMutex @@ -150,7 +409,7 @@ func TestObserverSimpleContentSpecified(t *testing.T) { o.AddReactor( testReaction, map[string][]byte{ - testFile: {}, + testFile: []byte("bar"), }, testFile) diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go index 4566271de..8402b755e 100644 --- a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go @@ -761,7 +761,7 @@ func TestParseModelsMap(t *testing.T) { OAuthRedirectModelAnnotationURIPrefix + "two": "justapath", OAuthRedirectModelAnnotationURIPrefix + "three": "http://redhat.com", OAuthRedirectModelAnnotationURIPrefix + "four": "http://hello:90/world", - OAuthRedirectModelAnnotationURIPrefix + "five": "scheme0://host0:port0/path0", + OAuthRedirectModelAnnotationURIPrefix + "five": "scheme0://host0:5000/path0", OAuthRedirectModelAnnotationReferencePrefix + "five": buildRedirectObjectReferenceString("kind0", "name0", "group0"), }, expected: map[string]model{ @@ -802,7 +802,7 @@ func TestParseModelsMap(t *testing.T) { }, "five": { scheme: "scheme0", - port: "port0", + port: "5000", path: "/path0", group: "group0", kind: "kind0", diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go index 90b319578..f0e5c252f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go @@ -77,6 +77,7 @@ func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configo return observedConfig, errs } if err != nil { + errs = append(errs, err) return previouslyObservedConfig, errs } @@ -95,7 +96,7 @@ func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configo } // we set cloudprovider configmap values only for some cloud providers. - validCloudProviders := sets.NewString("azure", "vsphere") + validCloudProviders := sets.NewString("azure", "gce", "openstack", "vsphere") if !validCloudProviders.Has(cloudProvider) { sourceCloudConfigMap = "" } @@ -151,8 +152,7 @@ func getPlatformName(platformType configv1.PlatformType, recorder events.Recorde cloudProvider = "gce" case configv1.LibvirtPlatformType: case configv1.OpenStackPlatformType: - // TODO(flaper87): Enable this once we've figured out a way to write the cloud provider config in the master nodes - //cloudProvider = "openstack" + cloudProvider = "openstack" case configv1.NonePlatformType: default: // the new doc on the infrastructure fields requires that we treat an unrecognized thing the same bare metal. diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go index 21be95596..1260ae3a6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go @@ -63,7 +63,8 @@ func TestObserveCloudProviderNames(t *testing.T) { cloudProviderCount: 0, }, { platform: configv1.OpenStackPlatformType, - cloudProviderCount: 0, + expected: "openstack", + cloudProviderCount: 1, }, { platform: configv1.GCPPlatformType, expected: "gce", diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go new file mode 100644 index 000000000..4231de358 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go @@ -0,0 +1,92 @@ +package proxy + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type ProxyLister interface { + ProxyLister() configlistersv1.ProxyLister +} + +func NewProxyObserveFunc(configPath []string) configobserver.ObserveConfigFunc { + return (&observeProxyFlags{ + configPath: configPath, + }).ObserveProxyConfig +} + +type observeProxyFlags struct { + configPath []string +} + +// ObserveProxyConfig observes the proxy.config.openshift.io/cluster object and writes +// its content to an unstructured object in a string map at the path from the constructor +func (f *observeProxyFlags) ObserveProxyConfig(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + proxyLister := genericListers.(ProxyLister) + + errs := []error{} + prevObservedProxyConfig := map[string]interface{}{} + + // grab the current Proxy config to later check whether it was updated + currentProxyMap, _, err := unstructured.NestedStringMap(existingConfig, f.configPath...) + if err != nil { + return prevObservedProxyConfig, append(errs, err) + } + + if len(currentProxyMap) > 0 { + unstructured.SetNestedStringMap(prevObservedProxyConfig, currentProxyMap, f.configPath...) + } + + observedConfig := map[string]interface{}{} + proxyConfig, err := proxyLister.ProxyLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveProxyConfig", "proxy.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + errs = append(errs, err) + return existingConfig, errs + } + + newProxyMap := proxyToMap(proxyConfig) + if newProxyMap != nil { + if err := unstructured.SetNestedStringMap(observedConfig, newProxyMap, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + if !reflect.DeepEqual(currentProxyMap, newProxyMap) { + recorder.Eventf("ObserveProxyConfig", "proxy changed to %q", newProxyMap) + } + + return observedConfig, errs +} + +func proxyToMap(proxy *configv1.Proxy) map[string]string { + proxyMap := map[string]string{} + + if noProxy := proxy.Status.NoProxy; len(noProxy) > 0 { + proxyMap["NO_PROXY"] = noProxy + } + + if httpProxy := proxy.Status.HTTPProxy; len(httpProxy) > 0 { + proxyMap["HTTP_PROXY"] = httpProxy + } + + if httpsProxy := proxy.Status.HTTPSProxy; len(httpsProxy) > 0 { + proxyMap["HTTPS_PROXY"] = httpsProxy + } + + if len(proxyMap) == 0 { + return nil + } + + return proxyMap +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go new file mode 100644 index 000000000..ef5a7e302 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go @@ -0,0 +1,105 @@ +package proxy + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type testLister struct { + lister configlistersv1.ProxyLister +} + +func (l testLister) ProxyLister() configlistersv1.ProxyLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} + +func TestObserveProxyConfig(t *testing.T) { + configPath := []string{"openshift", "proxy"} + + tests := []struct { + name string + proxySpec configv1.ProxySpec + proxyStatus configv1.ProxyStatus + previous map[string]string + expected map[string]interface{} + expectedError []error + eventsExpected int + }{ + { + name: "all unset", + proxySpec: configv1.ProxySpec{}, + proxyStatus: configv1.ProxyStatus{}, + expected: map[string]interface{}{}, + expectedError: []error{}, + }, + { + name: "all set", + proxySpec: configv1.ProxySpec{ + HTTPProxy: "http://someplace.it", + HTTPSProxy: "https://someplace.it", + NoProxy: "127.0.0.1", + }, + proxyStatus: configv1.ProxyStatus{ + HTTPProxy: "http://someplace.it", + HTTPSProxy: "https://someplace.it", + NoProxy: "127.0.0.1,incluster.address.it", + }, + expected: map[string]interface{}{ + "openshift": map[string]interface{}{ + "proxy": map[string]interface{}{ + "HTTP_PROXY": "http://someplace.it", + "HTTPS_PROXY": "https://someplace.it", + "NO_PROXY": "127.0.0.1,incluster.address.it", + }, + }, + }, + expectedError: []error{}, + eventsExpected: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(&configv1.Proxy{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: tt.proxySpec, + Status: tt.proxyStatus, + }) + listers := testLister{ + lister: configlistersv1.NewProxyLister(indexer), + } + eventRecorder := events.NewInMemoryRecorder("") + + initialExistingConfig := map[string]interface{}{} + + observeFn := NewProxyObserveFunc(configPath) + + got, errorsGot := observeFn(listers, eventRecorder, initialExistingConfig) + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() got = %v, want %v", got, tt.expected) + } + if !reflect.DeepEqual(errorsGot, tt.expectedError) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() errorsGot = %v, want %v", errorsGot, tt.expectedError) + } + if events := eventRecorder.Events(); len(events) != tt.eventsExpected { + t.Errorf("expected %d events, but got %d: %v", tt.eventsExpected, len(events), events) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go new file mode 100644 index 000000000..e93572cdc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go @@ -0,0 +1,225 @@ +package genericoperatorclient + +import ( + "reflect" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const globalConfigName = "cluster" + +func NewClusterScopedOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.OperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicOperatorClient{ + informer: informer, + client: client, + }, informers, nil +} + +type dynamicOperatorClient struct { + informer informers.GenericInformer + client dynamic.ResourceInterface +} + +func (c dynamicOperatorClient) Informer() cache.SharedIndexInformer { + return c.informer.Informer() +} + +func (c dynamicOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +// UpdateOperatorSpec overwrites the operator object spec with the values given +// in operatorv1.OperatorSpec while preserving pre-existing spec fields that have +// no correspondence in operatorv1.OperatorSpec. +func (c dynamicOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +// UpdateOperatorStatus overwrites the operator object status with the values given +// in operatorv1.OperatorStatus while preserving pre-existing status fields that have +// no correspondence in operatorv1.OperatorStatus. +func (c dynamicOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.OperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator spec when cast as an operator spec + newSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + origSpec, preExistingSpec, err := unstructured.NestedMap(obj, "spec") + if err != nil { + return err + } + if preExistingSpec { + flds := topLevelFields(*spec) + for k, v := range origSpec { + if !flds[k] { + if err := unstructured.SetNestedField(newSpec, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newSpec, "spec") +} + +func getOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.OperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorStatusFromUnstructured(obj map[string]interface{}, status *operatorv1.OperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator status when cast as an operator status + newStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(status) + if err != nil { + return err + } + + origStatus, preExistingStatus, err := unstructured.NestedMap(obj, "status") + if err != nil { + return err + } + if preExistingStatus { + flds := topLevelFields(*status) + for k, v := range origStatus { + if !flds[k] { + if err := unstructured.SetNestedField(newStatus, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newStatus, "status") +} + +func topLevelFields(obj interface{}) map[string]bool { + ret := map[string]bool{} + t := reflect.TypeOf(obj) + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + fieldName := fld.Name + if jsonTag := fld.Tag.Get("json"); jsonTag == "-" { + continue + } else if jsonTag != "" { + // check for possible comma as in "...,omitempty" + var commaIdx int + if commaIdx = strings.Index(jsonTag, ","); commaIdx < 0 { + commaIdx = len(jsonTag) + } + fieldName = jsonTag[:commaIdx] + } + ret[fieldName] = true + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go new file mode 100644 index 000000000..df2550573 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go @@ -0,0 +1,221 @@ +package genericoperatorclient + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/diff" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +func TestSetOperatorSpecFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + spec *operatorv1.OperatorSpec + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "keep-everything-outside-of-spec", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{}, + }, + spec: &operatorv1.OperatorSpec{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "replace-rawextensions", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "unsupportedConfigOverrides": map[string]interface{}{"foo": "bar"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "remove-observed-fields", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "observedConfig": map[string]interface{}{"a": "1", "b": "2"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + ObservedConfig: runtime.RawExtension{Raw: []byte(`{"a":1}`)}, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": map[string]interface{}{"a": int64(1)}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorSpecFromUnstructured(test.in, test.spec) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectDiff(test.in, test.expected)) + } + }) + } +} + +func TestSetOperatorStatusFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + status *operatorv1.OperatorStatus + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + { + name: "keep-everything-outside-of-status", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{}, + }, + status: &operatorv1.OperatorStatus{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{ + "readyReplicas": int64(0), + }, + }, + }, + { + name: "replace-condition", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "overwriteme", + }, + }, + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorStatusFromUnstructured(test.in, test.status) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectGoPrintDiff(test.in, test.expected)) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go new file mode 100644 index 000000000..35ae57d14 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go @@ -0,0 +1,201 @@ +package genericoperatorclient + +import ( + "time" + + "github.com/imdario/mergo" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/rest" +) + +func NewStaticPodOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.StaticPodOperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicStaticPodOperatorClient{ + dynamicOperatorClient: dynamicOperatorClient{ + informer: informer, + client: client, + }, + }, informers, nil +} + +type dynamicStaticPodOperatorClient struct { + dynamicOperatorClient +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + instance, err := c.client.Get("cluster", metav1.GetOptions{}) + if err != nil { + return nil, nil, "", err + } + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getStaticPodOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getStaticPodOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.StaticPodOperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredSpec, "spec") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredSpec, newUnstructuredSpec, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredSpec, "spec") +} + +func getStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.StaticPodOperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredStatus, "status") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredStatus, newUnstructuredStatus, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredStatus, "status") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index 870b7ceb6..da1a1cfa3 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -1,6 +1,7 @@ package resourceapply import ( + "bytes" "fmt" "sort" "strings" @@ -168,17 +169,28 @@ func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Record modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) } } + for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData { + if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) { + modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey) + } + } for requiredKey := range required.Data { if _, ok := existingCopy.Data[requiredKey]; !ok { modifiedKeys = append(modifiedKeys, "data."+requiredKey) } } + for requiredBinKey := range required.BinaryData { + if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok { + modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey) + } + } dataSame := len(modifiedKeys) == 0 if dataSame && !*modified { return existingCopy, false, nil } existingCopy.Data = required.Data + existingCopy.BinaryData = required.BinaryData actual, err := client.ConfigMaps(required.Namespace).Update(existingCopy) @@ -196,6 +208,10 @@ func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Record // ApplySecret merges objectmeta, requires data func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + if len(required.StringData) > 0 { + return nil, false, fmt.Errorf("Secret.stringData is not supported") + } + existing, err := client.Secrets(required.Namespace).Get(required.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { actual, err := client.Secrets(required.Namespace).Create(required) @@ -210,6 +226,7 @@ func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, re existingCopy := existing.DeepCopy() resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + dataSame := equality.Semantic.DeepEqual(existingCopy.Data, required.Data) if dataSame && !*modified { return existingCopy, false, nil @@ -217,7 +234,7 @@ func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, re existingCopy.Data = required.Data if klog.V(4) { - klog.Infof("Secret %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecret(existing, required)) } actual, err := client.Secrets(required.Namespace).Update(existingCopy) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go index 7b520ca2f..c1723626e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go @@ -139,6 +139,52 @@ func TestApplyConfigMap(t *testing.T) { } }, }, + { + name: "update on mismatch binary data", + existing: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + Data: map[string]string{ + "configmap": "value", + }, + }, + }, + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + Data: map[string]string{ + "configmap": "value", + }, + BinaryData: map[string][]byte{ + "binconfigmap": []byte("value"), + }, + }, + + expectedModified: true, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("update", "configmaps") { + t.Error(spew.Sdump(actions)) + } + expected := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + Data: map[string]string{ + "configmap": "value", + }, + BinaryData: map[string][]byte{ + "binconfigmap": []byte("value"), + }, + } + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if !equality.Semantic.DeepEqual(expected, actual) { + t.Error(JSONPatch(expected, actual)) + } + }, + }, } for _, test := range tests { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go index c5077f48e..c053be7ab 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -4,6 +4,9 @@ import ( "fmt" patch "github.com/evanphx/json-patch" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) @@ -31,3 +34,25 @@ func JSONPatch(original, modified runtime.Object) string { } return string(patchBytes) } + +// JSONPatchSecret generates a JSON patch between original and modified secrets, hiding its data, +// and return the JSON as a string. In case of error, the returned string will contain the error messages. +func JSONPatchSecret(original, modified *corev1.Secret) string { + safeModified := modified.DeepCopy() + safeOriginal := original.DeepCopy() + + for s := range safeOriginal.Data { + safeOriginal.Data[s] = []byte("OLD") + } + for s := range safeModified.Data { + if _, preoriginal := original.Data[s]; !preoriginal { + safeModified.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) { + safeModified.Data[s] = []byte("MODIFIED") + } else { + safeModified.Data[s] = []byte("OLD") + } + } + + return JSONPatch(safeOriginal, safeModified) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go index dc3a9db3d..b28c8770a 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -7,6 +7,7 @@ import ( "reflect" "k8s.io/klog" + "sigs.k8s.io/yaml" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -17,7 +18,14 @@ import ( // MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other // It returns the resultant configmap and a bool indicating if any changes were made to the configmap func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { - configBytes, err := MergeProcessConfig(specialCases, configYAMLs...) + return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) +} + +// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. +// It roundtrips the config through the given schema. +func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) if err != nil { return nil, false, err } @@ -85,6 +93,44 @@ func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte return currentConfigYAML, nil } +// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. +// The result is roundtripped through the given schema if it is non-nil. +func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + bs, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, err + } + + if schema == nil { + return bs, nil + } + + // roundtrip through the schema + typed := schema.DeepCopyObject() + if err := yaml.Unmarshal(bs, typed); err != nil { + return nil, err + } + typedBytes, err := json.Marshal(typed) + if err != nil { + return nil, err + } + var untypedJSON map[string]interface{} + if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { + return nil, err + } + + // and intersect output with input because we cannot rely on omitempty in the schema + inputBytes, err := yaml.YAMLToJSON(bs) + if err != nil { + return nil, err + } + var inputJSON map[string]interface{} + if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { + return nil, err + } + return json.Marshal(intersectJSON(inputJSON, untypedJSON)) +} + type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) // mergeConfig overwrites entries in curr by additional. It modifies curr. @@ -132,3 +178,53 @@ func mergeConfig(curr, additional map[string]interface{}, currentPath string, sp return nil } + +// jsonIntersection returns the intersection of both JSON object, +// preferring the values of the first argument. +func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := map[string]interface{}{} + for k, v1 := range x1 { + v2, ok := x2[k] + if !ok { + continue + } + ret[k] = intersectValue(v1, v2) + } + return ret +} + +func intersectArray(x1, x2 []interface{}) []interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := make([]interface{}, 0, len(x1)) + for i := range x1 { + if i >= len(x2) { + break + } + ret = append(ret, intersectValue(x1[i], x2[i])) + } + return ret +} + +func intersectValue(x1, x2 interface{}) interface{} { + switch x1 := x1.(type) { + case map[string]interface{}: + x2, ok := x2.(map[string]interface{}) + if !ok { + return x1 + } + return intersectJSON(x1, x2) + case []interface{}: + x2, ok := x2.([]interface{}) + if !ok { + return x1 + } + return intersectArray(x1, x2) + default: + return x1 + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go index b44ad2048..efaff9d6b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go @@ -6,6 +6,8 @@ import ( "testing" "k8s.io/apimachinery/pkg/util/diff" + + controlplanev1 "github.com/openshift/api/kubecontrolplane/v1" ) func TestMergeConfig(t *testing.T) { @@ -209,3 +211,63 @@ bravo: two }) } } + +func TestMergePrunedConfig(t *testing.T) { + tests := []struct { + name string + curr string + additional string + specialCases map[string]MergeFunc + + expected string + expectedErr string + }{ + { + name: "prune unknown values", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","kind":"the-kind"}`, + }, + { + name: "prune unknown values with array", + curr: ` +apiVersion: foo +kind: the-kind +corsAllowedOrigins: +- (?i)//openshift(:|\z) +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","corsAllowedOrigins":["(?i)//openshift(:|\\z)"],"kind":"the-kind"}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := MergePrunedProcessConfig(&controlplanev1.KubeAPIServerConfig{}, test.specialCases, []byte(test.curr), []byte(test.additional)) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + if err != nil { + return + } + + if test.expected != string(actual) { + t.Error(diff.StringDiff(test.expected, string(actual))) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go index f62218ac3..9513078ed 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go @@ -62,11 +62,13 @@ func (o *CertSyncControllerOptions) Run() error { return err } - initialContent, _ := ioutil.ReadFile(o.KubeConfigFile) - observer.AddReactor(fileobserver.ExitOnChangeReactor, map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) - stopCh := make(chan struct{}) + initialContent, _ := ioutil.ReadFile(o.KubeConfigFile) + observer.AddReactor(fileobserver.TerminateOnChangeReactor(func() { + close(stopCh) + }), map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(o.kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) eventRecorder := events.NewKubeRecorder(o.kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go index e581a4418..1670b2c56 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go @@ -16,7 +16,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" corev1interface "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/listers/core/v1" + v1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog" @@ -79,6 +79,7 @@ func getSecretDir(targetDir, secretName string) string { func (c *CertSyncController) sync() error { errors := []error{} + klog.Infof("Syncing configmaps: %v", c.configMaps) for _, cm := range c.configMaps { configMap, err := c.configMapLister.ConfigMaps(c.namespace).Get(cm.Name) switch { @@ -101,11 +102,14 @@ func (c *CertSyncController) sync() error { // remove missing content if err := os.RemoveAll(getConfigMapDir(c.destinationDir, cm.Name)); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) } + c.eventRecorder.Eventf("CertificateRemoved", "Removed file for configmap: %s/%s", configMap.Namespace, configMap.Name) continue case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -138,6 +142,7 @@ func (c *CertSyncController) sync() error { configMap, err = c.configmapGetter.Get(configMap.Name, metav1.GetOptions{}) if err != nil { // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -150,6 +155,7 @@ func (c *CertSyncController) sync() error { klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -162,12 +168,15 @@ func (c *CertSyncController) sync() error { klog.Infof("Writing configmap manifest %q ...", fullFilename) if err := ioutil.WriteFile(fullFilename, []byte(content), 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated configmap: %s/%s", configMap.Namespace, configMap.Name) } + klog.Infof("Syncing secrets: %v", c.secrets) for _, s := range c.secrets { secret, err := c.secretLister.Secrets(c.namespace).Get(s.Name) switch { @@ -188,13 +197,23 @@ func (c *CertSyncController) sync() error { continue } + // check if the secret file exists, skip firing events if it does not + secretFile := getSecretDir(c.destinationDir, s.Name) + if _, err := os.Stat(secretFile); os.IsNotExist(err) { + continue + } + // remove missing content - if err := os.RemoveAll(getSecretDir(c.destinationDir, s.Name)); err != nil { + if err := os.RemoveAll(secretFile); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for missing secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) + continue } + c.eventRecorder.Warningf("CertificateRemoved", "Removed file for missing secret: %s/%s", secret.Namespace, secret.Name) continue case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -227,6 +246,7 @@ func (c *CertSyncController) sync() error { secret, err = c.secretGetter.Get(secret.Name, metav1.GetOptions{}) if err != nil { // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -239,6 +259,7 @@ func (c *CertSyncController) sync() error { klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -252,10 +273,12 @@ func (c *CertSyncController) sync() error { klog.Infof("Writing secret manifest %q ...", fullFilename) if err := ioutil.WriteFile(fullFilename, content, 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated secret: %s/%s", secret.Namespace, secret.Name) } return utilerrors.NewAggregate(errors) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go index c8821a00e..04cfeab28 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go @@ -540,7 +540,7 @@ func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *op // stop early, don't wait for ready static pod because a new revision is waiting ret.LastFailedRevision = currNodeState.TargetRevision ret.TargetRevision = 0 - ret.LastFailedRevisionErrors = []string{"static pod of revision has been installed, but is not ready while new revision % is pending"} + ret.LastFailedRevisionErrors = []string{fmt.Sprintf("static pod of revision has been installed, but is not ready while new revision %d is pending", currNodeState.TargetRevision)} return ret, false, "new revision pending", nil } @@ -600,7 +600,7 @@ func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *op ret.LastFailedRevision = currNodeState.TargetRevision ret.TargetRevision = 0 if len(errors) == 0 { - errors = append(errors, "no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name) + errors = append(errors, fmt.Sprintf("no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name)) } ret.LastFailedRevisionErrors = errors return ret, false, "installer pod failed", nil diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go new file mode 100644 index 000000000..2bcc5f710 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go @@ -0,0 +1,254 @@ +package installerstate + +import ( + "fmt" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const installerStateControllerWorkQueueKey = "key" + +// maxToleratedPodPendingDuration is the maximum time we tolerate installer pod in pending state +var maxToleratedPodPendingDuration = 5 * time.Minute + +type InstallerStateController struct { + podsGetter corev1client.PodsGetter + eventsGetter corev1client.EventsGetter + queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + targetNamespace string + operatorClient v1helpers.StaticPodOperatorClient + eventRecorder events.Recorder + + timeNowFn func() time.Time +} + +func NewInstallerStateController(kubeInformersForTargetNamespace informers.SharedInformerFactory, + podsGetter corev1client.PodsGetter, + eventsGetter corev1client.EventsGetter, + operatorClient v1helpers.StaticPodOperatorClient, + targetNamespace string, + recorder events.Recorder, +) *InstallerStateController { + c := &InstallerStateController{ + podsGetter: podsGetter, + eventsGetter: eventsGetter, + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("installer-state-controller"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerStateController"), + timeNowFn: time.Now, + } + + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + return c +} + +func (c *InstallerStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + } +} + +// degradedConditionNames lists all supported condition types. +var degradedConditionNames = []string{ + "InstallerPodPendingDegraded", + "InstallerPodContainerWaitingDegraded", + "InstallerPodNetworkingDegraded", +} + +func (c *InstallerStateController) sync() error { + pods, err := c.podsGetter.Pods(c.targetNamespace).List(metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"app": "installer"}).String(), + }) + if err != nil { + return err + } + + // collect all startingObjects that are in pending state for longer than maxToleratedPodPendingDuration + pendingPods := []*v1.Pod{} + for _, pod := range pods.Items { + if pod.Status.Phase != v1.PodPending || pod.Status.StartTime == nil { + continue + } + if c.timeNowFn().Sub(pod.Status.StartTime.Time) >= maxToleratedPodPendingDuration { + pendingPods = append(pendingPods, pod.DeepCopy()) + } + } + + // in theory, there should never be two installer startingObjects pending as we don't roll new installer pod + // until the previous/existing pod has finished its job. + foundConditions := []operatorv1.OperatorCondition{} + foundConditions = append(foundConditions, c.handlePendingInstallerPods(pendingPods)...) + + // handle networking conditions that are based on events + networkConditions, err := c.handlePendingInstallerPodsNetworkEvents(pendingPods) + if err != nil { + return err + } + foundConditions = append(foundConditions, networkConditions...) + + updateConditionFuncs := []v1helpers.UpdateStaticPodStatusFunc{} + + // check the supported degraded foundConditions and check if any pending pod matching them. + for _, degradedConditionName := range degradedConditionNames { + // clean up existing foundConditions + updatedCondition := operatorv1.OperatorCondition{ + Type: degradedConditionName, + Status: operatorv1.ConditionFalse, + } + if condition := v1helpers.FindOperatorCondition(foundConditions, degradedConditionName); condition != nil { + updatedCondition = *condition + } + updateConditionFuncs = append(updateConditionFuncs, v1helpers.UpdateStaticPodConditionFn(updatedCondition)) + } + + if _, _, err := v1helpers.UpdateStaticPodStatus(c.operatorClient, updateConditionFuncs...); err != nil { + return err + } + + return nil +} + +func (c *InstallerStateController) handlePendingInstallerPodsNetworkEvents(pods []*v1.Pod) ([]operatorv1.OperatorCondition, error) { + conditions := []operatorv1.OperatorCondition{} + if len(pods) == 0 { + return conditions, nil + } + namespaceEvents, err := c.eventsGetter.Events(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, event := range namespaceEvents.Items { + if event.InvolvedObject.Kind != "Pod" { + continue + } + if !strings.Contains(event.Message, "failed to create pod network") { + continue + } + for _, pod := range pods { + if pod.Name != event.InvolvedObject.Name { + continue + } + // If we already find the pod that is pending because of the networking problem, skip other pods. + // This will reduce the events we fire. + if v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") != nil { + break + } + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodNetworkingDegraded", + Status: operatorv1.ConditionTrue, + Reason: event.Reason, + Message: fmt.Sprintf("Pod %q on node %q observed degraded networking: %s", pod.Name, pod.Spec.NodeName, event.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + return conditions, nil +} + +func (c *InstallerStateController) handlePendingInstallerPods(pods []*v1.Pod) []operatorv1.OperatorCondition { + conditions := []operatorv1.OperatorCondition{} + for _, pod := range pods { + // at this point we already know the pod is pending for longer than expected + pendingTime := c.timeNowFn().Sub(pod.Status.StartTime.Time) + + // the pod is in the pending state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + if len(pod.Status.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodPendingDegraded", + Reason: pod.Status.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q is Pending for %s because %s", pod.Name, pod.Spec.NodeName, pendingTime, pod.Status.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + + // one or more containers are in waiting state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Waiting == nil { + continue + } + if state := containerStatus.State.Waiting; len(state.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodContainerWaitingDegraded", + Reason: state.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q container %q is waiting for %s because %s", pod.Name, pod.Spec.NodeName, containerStatus.Name, pendingTime, state.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + } + + return conditions +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *InstallerStateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting InstallerStateController") + defer klog.Infof("Shutting down InstallerStateController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // add time based trigger + go wait.Until(func() { c.queue.Add(installerStateControllerWorkQueueKey) }, time.Minute, stopCh) + + <-stopCh +} + +func (c *InstallerStateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *InstallerStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go new file mode 100644 index 000000000..c6e39856f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go @@ -0,0 +1,177 @@ +package installerstate + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func newInstallerPod(name string, mutateStatusFn func(*corev1.PodStatus)) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test", + Labels: map[string]string{"app": "installer"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{}, + } + mutateStatusFn(&pod.Status) + return pod +} + +func newInstallerPodNetworkEvent(mutateFn func(*corev1.Event)) *corev1.Event { + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName("test"), + Namespace: "test", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "Pod", + Name: "installer-1", + }, + Reason: "FailedCreatePodSandBox", + Message: `'(combined from similar events): Failed create pod sandbox: rpc error: + code = Unknown desc = failed to create pod network sandbox k8s_installer-5-control-plane-1_openshift-kube-apiserver_900db7f3-d2ce-11e9-8fc8-005056be0641_0(121698f4862fd67157ca586cab18aefb048fe5d7b3bd87516098ac0e91a90a13): + Multus: Err adding pod to network "openshift-sdn": Multus: error in invoke Delegate + add - "openshift-sdn": failed to send CNI request: Post http://dummy/: dial unix + /var/run/openshift-sdn/cniserver/socket: connect: connection refused'`, + } + if mutateFn != nil { + mutateFn(event) + } + return event +} + +func TestInstallerStateController(t *testing.T) { + tests := []struct { + name string + startingObjects []runtime.Object + evalConditions func(t *testing.T, conditions []operatorv1.OperatorCondition) + }{ + { + name: "should report pending pod", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + { + name: "should report pod with failing networking", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodNetworkingDegraded condition to be True") + } + }, + }, + { + name: "should report pending pod with waiting container", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + status.ContainerStatuses = append(status.ContainerStatuses, corev1.ContainerStatus{Name: "test", State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodInitializing", + Message: "initializing error", + }}}) + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + }, + }, + { + name: "should report false when no pending startingObjects", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodRunning + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tt.startingObjects...) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + stopCh := make(chan struct{}) + go kubeInformers.Start(stopCh) + defer close(stopCh) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient(&operatorv1.StaticPodOperatorSpec{}, &operatorv1.StaticPodOperatorStatus{}, nil, nil) + eventRecorder := eventstesting.NewTestingEventRecorder(t) + controller := NewInstallerStateController(kubeInformers, kubeClient.CoreV1(), kubeClient.CoreV1(), fakeStaticPodOperatorClient, "test", eventRecorder) + if err := controller.sync(); err != nil { + t.Error(err) + return + } + + _, status, _, err := fakeStaticPodOperatorClient.GetOperatorState() + if err != nil { + t.Error(err) + return + } + if tt.evalConditions != nil { + tt.evalConditions(t, status.Conditions) + } + }) + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go index 6071d035e..f694da0e6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go @@ -193,7 +193,7 @@ func (c RevisionController) isLatestRevisionCurrent(revision int32) (bool, strin } if !equality.Semantic.DeepEqual(existingData, requiredData) { if klog.V(4) { - klog.Infof("secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatch(existing, required)) + klog.Infof("Secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatchSecret(existing, required)) } secretChanges = append(secretChanges, fmt.Sprintf("secret/%s has changed", s.Name)) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go index f17b19871..00db28341 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate" "github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller" @@ -138,6 +139,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator configMapClient := v1helpers.CachedConfigMapGetter(b.kubeClient.CoreV1(), b.kubeInformers) secretClient := v1helpers.CachedSecretGetter(b.kubeClient.CoreV1(), b.kubeInformers) podClient := b.kubeClient.CoreV1() + eventsClient := b.kubeClient.CoreV1() operandInformers := b.kubeInformers.InformersFor(b.operandNamespace) clusterInformers := b.kubeInformers.InformersFor("") @@ -172,6 +174,14 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator b.certConfigMaps, b.certSecrets, ) + controllers.installerStateController = installerstate.NewInstallerStateController( + operandInformers, + podClient, + eventsClient, + b.staticPodOperatorClient, + b.operandNamespace, + eventRecorder, + ) } if len(b.operandName) > 0 { @@ -239,6 +249,9 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator if controllers.installerController == nil { errs = append(errs, fmt.Errorf("missing installerController; cannot proceed")) } + if controllers.installerStateController == nil { + errs = append(errs, fmt.Errorf("missing installerStateController; cannot proceed")) + } if controllers.staticPodStateController == nil { eventRecorder.Warning("StaticPodStateControllerMissing", "not enough information provided, not all functionality is present") } @@ -255,6 +268,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator type staticPodOperatorControllers struct { revisionController *revision.RevisionController installerController *installer.InstallerController + installerStateController *installerstate.InstallerStateController staticPodStateController *staticpodstate.StaticPodStateController pruneController *prune.PruneController nodeController *node.NodeController @@ -272,6 +286,7 @@ func (o *staticPodOperatorControllers) WithInstallerPodMutationFn(installerPodMu func (o *staticPodOperatorControllers) Run(stopCh <-chan struct{}) { go o.revisionController.Run(1, stopCh) go o.installerController.Run(1, stopCh) + go o.installerStateController.Run(1, stopCh) go o.staticPodStateController.Run(1, stopCh) go o.pruneController.Run(1, stopCh) go o.nodeController.Run(1, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go index 2db8cda74..18ba33178 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go @@ -64,7 +64,7 @@ func (o *PruneOptions) Validate() error { return fmt.Errorf("--resource-dir is required") } if o.MaxEligibleRevision == 0 { - return fmt.Errorf("--max-eligible-id is required") + return fmt.Errorf("--max-eligible-revision is required") } if len(o.StaticPodName) == 0 { return fmt.Errorf("--static-pod-name is required") diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go index c0d64b7b2..4c2597e46 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go @@ -6,6 +6,8 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" + "strings" "syscall" "time" @@ -26,8 +28,11 @@ import ( ) type FileWatcherOptions struct { - // ProcessName is the name of the process we will send SIGTERM + // ProcessName is the name of the process to look for in /proc if non-empty, + // indentifying the process to send SIGTERM to. ProcessName string + // PidFile contains the pid of the process to send SIGTERM to. Can be empty. + PidFile string // Files lists all files we want to monitor for changes Files []string @@ -43,6 +48,9 @@ type FileWatcherOptions struct { // Time to give the process to terminate gracefully TerminationGracePeriod time.Duration + // ReadyFile is touched when the watched files have been initially read + ReadyFile string + // for unit-test to mock getting the process PID (unit-test) findPidByNameFn func(name string) (int, bool, error) @@ -119,12 +127,14 @@ func NewFileWatcherWatchdog() *cobra.Command { } func (o *FileWatcherOptions) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&o.ProcessName, "process-name", "", "name of the process to send TERM signal to on file change (eg. 'hyperkube').") + fs.StringVar(&o.ProcessName, "process-name", "", "base name of the binary to send the TERM signal to on file change (eg. 'hyperkube').") + fs.StringVar(&o.PidFile, "pid-file", "", "file with the pid to send the TERM signal to on file change.") fs.StringSliceVar(&o.Files, "files", o.Files, "comma separated list of file names to monitor for changes") fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty") fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to report the watchdog events") fs.DurationVar(&o.Interval, "interval", 5*time.Second, "interval specifying how aggressive the file checks should be") fs.DurationVar(&o.TerminationGracePeriod, "termination-grace-period", 30*time.Second, "interval specifying how long to wait until sending KILL signal to the process") + fs.StringVar(&o.ReadyFile, "ready-file", o.ReadyFile, "this file is touched when the watched files have been read initially (to avoid race between watchee and watcher)") } func (o *FileWatcherOptions) Complete() error { @@ -160,8 +170,8 @@ func (o *FileWatcherOptions) Complete() error { } func (o *FileWatcherOptions) Validate() error { - if len(o.ProcessName) == 0 { - return fmt.Errorf("process name must be specified") + if len(o.ProcessName) == 0 && len(o.PidFile) == 0 { + return fmt.Errorf("process name or pid file must be specified") } if len(o.Files) == 0 { return fmt.Errorf("at least one file to observe must be specified") @@ -179,10 +189,32 @@ func (o *FileWatcherOptions) runPidObserver(ctx context.Context, pidObservedCh c retries := 0 pollErr := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { retries++ - // attempt to find the PID by process name via /proc - observedPID, found, err := o.findPidByNameFn(o.ProcessName) - if !found || err != nil { - klog.Warningf("Unable to determine PID for %q (retry: %d, err: %v)", o.ProcessName, retries, err) + observedPID := -1 + if len(o.ProcessName) > 0 { + // attempt to find the PID by process name via /proc + pid, found, err := o.findPidByNameFn(o.ProcessName) + if !found || err != nil { + klog.Warningf("Unable to determine PID for %q (retry: %d, err: %v)", o.ProcessName, retries, err) + } else { + observedPID = pid + } + } + if len(o.PidFile) > 0 { + // attempt to find the PID by pid file + bs, err := ioutil.ReadFile(o.PidFile) + if err != nil { + klog.Warningf("Unable to read pid file %s: %v", o.PidFile, err) + } else { + lines := strings.SplitN(string(bs), "\n", 2) + i, err := strconv.Atoi(lines[0]) + if err != nil { + klog.Warningf("Unable to parse pid file %s: %v", o.PidFile, err) + } else { + observedPID = i + } + } + } + if observedPID < 0 { return false, nil } @@ -293,7 +325,13 @@ func (o *FileWatcherOptions) runWatchdog(ctx context.Context) error { go o.runPidObserver(watchdogCtx, pidObservedCh) // Wait while we get the initial PID for the process - klog.Infof("Waiting for process %q PID ...", o.ProcessName) + if len(o.ProcessName) > 0 && len(o.PidFile) > 0 { + klog.Infof("Waiting for process with name %q or PID file %q...", o.ProcessName, o.PidFile) + } else if len(o.ProcessName) > 0 { + klog.Infof("Waiting for process with process name %q ...", o.ProcessName) + } else if len(o.PidFile) > 0 { + klog.Infof("Waiting for process PID file %q ...", o.PidFile) + } currentPID := <-pidObservedCh // Mutate path for specified files as '/proc/PID/root/' @@ -312,6 +350,14 @@ func (o *FileWatcherOptions) runWatchdog(ctx context.Context) error { o.recorder.Eventf("FileChangeWatchdogStarted", "Started watching files for process %s[%d]", o.ProcessName, currentPID) + if len(o.ReadyFile) > 0 { + f, err := os.Create(o.ReadyFile) + if err != nil { + return fmt.Errorf("cannot touch ready file %q: %v", o.ReadyFile, err) + } + f.Close() + } + observer, err := fileobserver.NewObserver(o.Interval) if err != nil { o.recorder.Warningf("ObserverFailed", "Failed to start to file observer: %v", err) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go index 8821e6457..a3f302570 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go @@ -23,6 +23,7 @@ func TestPidObserver(t *testing.T) { } watcher := &FileWatcherOptions{ + ProcessName: "foo", findPidByNameFn: getProcessPIDByName, } diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index b5d392520..dffb24dff 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -341,7 +341,10 @@ func newInformer( // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + ClientUnderstandsReplacedDeltaType: true, + }) cfg := &Config{ Queue: fifo, @@ -354,7 +357,7 @@ func newInformer( // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { - case Sync, Added, Updated: + case Sync, Replaced, Added, Updated: if old, exists, err := clientState.Get(d.Object); err == nil && exists { if err := clientState.Update(d.Object); err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f24eec254..1cd53b382 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -56,15 +56,64 @@ import ( // and internal tests. // // Also see the comment on DeltaFIFO. +// +// Warning: This constructs a DeltaFIFO that does not differentiate between +// events caused by a call to Replace (e.g., from a relist, which may +// contain object updates), and synthetic events caused by a periodic resync +// (which just emit the existing object). See https://issue.k8s.io/86015 for details. +// +// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., ClientUnderstandsReplacedDeltaType: true})` +// instead to receive a `Replaced` event depending on the type. +// +// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects}) func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: keyFunc, + KnownObjects: knownObjects, + }) +} + +// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are +// optional. +type DeltaFIFOOptions struct { + + // KeyFunction is used to figure out what key an object should have. (It's + // exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.) + // Optional, the default is MetaNamespaceKeyFunc which is essentially + // / + KeyFunction KeyFunc + + // KnownObjects is expected to return a list of keys that the consumer of + // this queue "knows about". It is used to decide which items are missing + // when Replace() is called; 'Deleted' deltas are produced for these items. + // It may be nil if you don't need to detect all deletions. + KnownObjects KeyListerGetter + + // ClientUnderstandsReplacedDeltaType is whether or not the queue consumer understands + // the Replaced DeltaType. Before Replaced was added, Replace() was treated + // the same as Sync(). For backwards-compatibility purposes, this is false + // by default. + ClientUnderstandsReplacedDeltaType bool +} + +// NewDeltaFIFOWithOptions returns a Store which can be used process changes to +// items. See also the comment on DeltaFIFO. +func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { + if opts.KeyFunction == nil { + opts.KeyFunction = MetaNamespaceKeyFunc + } + f := &DeltaFIFO{ items: map[string]Deltas{}, queue: []string{}, - keyFunc: keyFunc, - knownObjects: knownObjects, + keyFunc: opts.KeyFunction, + knownObjects: opts.KnownObjects, + + clientUnderstandsReplacedDeltaType: opts.ClientUnderstandsReplacedDeltaType, } f.cond.L = &f.lock return f + } // DeltaFIFO is like FIFO, but allows you to process deletes. @@ -124,6 +173,10 @@ type DeltaFIFO struct { // Currently, not used to gate any of CRED operations. closed bool closedLock sync.Mutex + + // clientUnderstandsReplaced is whether to emit the Replaced or Sync + // DeltaType when Replace() is called (to preserve backwards compat). + clientUnderstandsReplacedDeltaType bool } var ( @@ -453,13 +506,19 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { defer f.lock.Unlock() keys := make(sets.String, len(list)) + // keep backwards compat for old clients + action := Sync + if f.clientUnderstandsReplacedDeltaType { + action = Replaced + } + for _, item := range list { key, err := f.KeyOf(item) if err != nil { return KeyError{item, err} } keys.Insert(key) - if err := f.queueActionLocked(Sync, item); err != nil { + if err := f.queueActionLocked(action, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } @@ -597,10 +656,14 @@ const ( Added DeltaType = "Added" Updated DeltaType = "Updated" Deleted DeltaType = "Deleted" - // The other types are obvious. You'll get Sync deltas when: - // * A watch expires/errors out and a new list/watch cycle is started. - // * You've turned on periodic syncs. - // (Anything that trigger's DeltaFIFO's Replace() method.) + // Replaced is emitted when we encountered watch errors and had to do a + // relist. We don't know if the replaced object has changed. + // + // NOTE: Previous versions of DeltaFIFO would use Sync for Replace events + // as well. Hence, Replaced is only emitted when the option + // ClientUnderstandsReplacedDeltaType is true. + Replaced DeltaType = "Replaced" + // Sync is for synthetic events during a periodic resync. Sync DeltaType = "Sync" ) diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo_test.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo_test.go index afe0a5a48..a530eb890 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo_test.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo_test.go @@ -257,6 +257,24 @@ func TestDeltaFIFO_ResyncNonExisting(t *testing.T) { } } +func TestDeltaFIFO_Resync(t *testing.T) { + f := NewDeltaFIFO( + testFifoObjectKeyFunc, + keyLookupFunc(func() []testFifoObject { + return []testFifoObject{mkFifoObj("foo", 5)} + }), + ) + f.Resync() + + deltas := f.items["foo"] + if len(deltas) != 1 { + t.Fatalf("unexpected deltas length: %v", deltas) + } + if deltas[0].Type != Sync { + t.Errorf("unexpected delta: %v", deltas[0]) + } +} + func TestDeltaFIFO_DeleteExistingNonPropagated(t *testing.T) { f := NewDeltaFIFO( testFifoObjectKeyFunc, @@ -302,6 +320,60 @@ func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) { } } +// TestDeltaFIFO_ReplaceMakesDeletionsReplaced is the same as the above test, but +// ensures that a Replaced DeltaType is emitted. +func TestDeltaFIFO_ReplaceMakesDeletionsReplaced(t *testing.T) { + f := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: testFifoObjectKeyFunc, + KnownObjects: keyLookupFunc(func() []testFifoObject { + return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)} + }), + ClientUnderstandsReplacedDeltaType: true, + }) + + f.Delete(mkFifoObj("baz", 10)) + f.Replace([]interface{}{mkFifoObj("foo", 6)}, "0") + + expectedList := []Deltas{ + {{Deleted, mkFifoObj("baz", 10)}}, + {{Replaced, mkFifoObj("foo", 6)}}, + // Since "bar" didn't have a delete event and wasn't in the Replace list + // it should get a tombstone key with the right Obj. + {{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: mkFifoObj("bar", 6)}}}, + } + + for _, expected := range expectedList { + cur := Pop(f).(Deltas) + if e, a := expected, cur; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + } +} + +// TestDeltaFIFO_ReplaceDeltaType checks that passing ClientunderstandsReplacedDeltaType +// means that Replaced is correctly emitted. +func TestDeltaFIFO_ReplaceDeltaType(t *testing.T) { + f := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: testFifoObjectKeyFunc, + KnownObjects: keyLookupFunc(func() []testFifoObject { + return []testFifoObject{mkFifoObj("foo", 5)} + }), + ClientUnderstandsReplacedDeltaType: true, + }) + f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0") + + expectedList := []Deltas{ + {{Replaced, mkFifoObj("foo", 5)}}, + } + + for _, expected := range expectedList { + cur := Pop(f).(Deltas) + if e, a := expected, cur; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + } +} + func TestDeltaFIFO_UpdateResyncRace(t *testing.T) { f := NewDeltaFIFO( testFifoObjectKeyFunc, diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index b2f3dba07..e8cad5dd7 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -189,7 +189,10 @@ type deleteNotification struct { func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer) + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + ClientUnderstandsReplacedDeltaType: true, + }) cfg := &Config{ Queue: fifo, @@ -348,19 +351,19 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { // from oldest to newest for _, d := range obj.(Deltas) { switch d.Type { - case Sync, Added, Updated: - isSync := d.Type == Sync + case Sync, Replaced, Added, Updated: s.cacheMutationDetector.AddObject(d.Object) if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { if err := s.indexer.Update(d.Object); err != nil { return err } + isSync := d.Type == Sync s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) } else { if err := s.indexer.Add(d.Object); err != nil { return err } - s.processor.distribute(addNotification{newObj: d.Object}, isSync) + s.processor.distribute(addNotification{newObj: d.Object}, false) } case Deleted: if err := s.indexer.Delete(d.Object); err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer_test.go b/vendor/k8s.io/client-go/tools/cache/shared_informer_test.go index 22e4a90dd..26a7a1624 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer_test.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer_test.go @@ -92,7 +92,7 @@ func (l *testListener) satisfiedExpectations() bool { l.lock.RLock() defer l.lock.RUnlock() - return len(l.receivedItemNames) == l.expectedItemNames.Len() && sets.NewString(l.receivedItemNames...).Equal(l.expectedItemNames) + return sets.NewString(l.receivedItemNames...).Equal(l.expectedItemNames) } func TestListenerResyncPeriods(t *testing.T) { @@ -263,3 +263,70 @@ func TestSharedInformerInitializationRace(t *testing.T) { go informer.Run(stop) close(stop) } + +// TestSharedInformerWatchDisruption simulates a watch that was closed +// with updates to the store during that time. We ensure that handlers with +// resync and no resync see the expected state. +func TestSharedInformerWatchDisruption(t *testing.T) { + // source simulates an apiserver object endpoint. + source := fcache.NewFakeControllerSource() + + source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "pod1"}}) + source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "pod2"}}) + + // create the shared informer and resync every 1s + informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer) + + clock := clock.NewFakeClock(time.Now()) + informer.clock = clock + informer.processor.clock = clock + + // listener, never resync + listenerNoResync := newTestListener("listenerNoResync", 0, "pod1", "pod2") + informer.AddEventHandlerWithResyncPeriod(listenerNoResync, listenerNoResync.resyncPeriod) + + listenerResync := newTestListener("listenerResync", 1*time.Second, "pod1", "pod2") + informer.AddEventHandlerWithResyncPeriod(listenerResync, listenerResync.resyncPeriod) + listeners := []*testListener{listenerNoResync, listenerResync} + + stop := make(chan struct{}) + defer close(stop) + + go informer.Run(stop) + + for _, listener := range listeners { + if !listener.ok() { + t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames) + } + } + + // Add pod3, bump pod2 but don't broadcast it, so that the change will be seen only on relist + source.AddDropWatch(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod3", UID: "pod3"}}) + source.ModifyDropWatch(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "pod2"}}) + + // Ensure that nobody saw any changes + for _, listener := range listeners { + if !listener.ok() { + t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames) + } + } + + for _, listener := range listeners { + listener.receivedItemNames = []string{} + } + + listenerNoResync.expectedItemNames = sets.NewString("pod1", "pod2", "pod3") + listenerResync.expectedItemNames = sets.NewString("pod1", "pod2", "pod3") + + // This calls shouldSync, which deletes noResync from the list of syncingListeners + clock.Step(1 * time.Second) + + // Simulate a connection loss (or even just a too-old-watch) + source.ResetWatch() + + for _, listener := range listeners { + if !listener.ok() { + t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames) + } + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go b/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go index 24362801b..16e66fc67 100644 --- a/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go +++ b/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go @@ -18,11 +18,13 @@ package framework import ( "errors" + "fmt" "math/rand" "strconv" "sync" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -59,6 +61,7 @@ type FakeControllerSource struct { Items map[nnu]runtime.Object changes []watch.Event // one change per resourceVersion Broadcaster *watch.Broadcaster + lastRV int } type FakePVControllerSource struct { @@ -75,6 +78,16 @@ type nnu struct { uid types.UID } +// ResetWatch simulates connection problems; creates a new Broadcaster and flushes +// the change queue so that clients have to re-list and watch. +func (f *FakeControllerSource) ResetWatch() { + f.lock.Lock() + defer f.lock.Unlock() + f.Broadcaster.Shutdown() + f.Broadcaster = watch.NewBroadcaster(100, watch.WaitIfChannelFull) + f.changes = []watch.Event{} +} + // Add adds an object to the set and sends an add event to watchers. // obj's ResourceVersion is set. func (f *FakeControllerSource) Add(obj runtime.Object) { @@ -129,8 +142,8 @@ func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) { panic(err) // this is test code only } - resourceVersion := len(f.changes) + 1 - accessor.SetResourceVersion(strconv.Itoa(resourceVersion)) + f.lastRV += 1 + accessor.SetResourceVersion(strconv.Itoa(f.lastRV)) f.changes = append(f.changes, e) key := f.key(accessor) switch e.Type { @@ -173,8 +186,7 @@ func (f *FakeControllerSource) List(options metav1.ListOptions) (runtime.Object, if err != nil { return nil, err } - resourceVersion := len(f.changes) - listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion)) + listAccessor.SetResourceVersion(strconv.Itoa(f.lastRV)) return listObj, nil } @@ -194,8 +206,7 @@ func (f *FakePVControllerSource) List(options metav1.ListOptions) (runtime.Objec if err != nil { return nil, err } - resourceVersion := len(f.changes) - listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion)) + listAccessor.SetResourceVersion(strconv.Itoa(f.lastRV)) return listObj, nil } @@ -215,8 +226,7 @@ func (f *FakePVCControllerSource) List(options metav1.ListOptions) (runtime.Obje if err != nil { return nil, err } - resourceVersion := len(f.changes) - listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion)) + listAccessor.SetResourceVersion(strconv.Itoa(f.lastRV)) return listObj, nil } @@ -229,9 +239,27 @@ func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interfac if err != nil { return nil, err } - if rc < len(f.changes) { + if rc < f.lastRV { + // if the change queue was flushed... + if len(f.changes) == 0 { + return nil, apierrors.NewResourceExpired(fmt.Sprintf("too old resource version: %d (%d)", rc, f.lastRV)) + } + + // get the RV of the oldest object in the change queue + oldestRV, err := meta.NewAccessor().ResourceVersion(f.changes[0].Object) + if err != nil { + panic(err) + } + oldestRC, err := strconv.Atoi(oldestRV) + if err != nil { + panic(err) + } + if rc < oldestRC { + return nil, apierrors.NewResourceExpired(fmt.Sprintf("too old resource version: %d (%d)", rc, oldestRC)) + } + changes := []watch.Event{} - for _, c := range f.changes[rc:] { + for _, c := range f.changes[rc-oldestRC+1:] { // Must make a copy to allow clients to modify the // object. Otherwise, if they make a change and write // it back, they will inadvertently change the our @@ -240,7 +268,7 @@ func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interfac changes = append(changes, watch.Event{Type: c.Type, Object: c.Object.DeepCopyObject()}) } return f.Broadcaster.WatchWithPrefix(changes), nil - } else if rc > len(f.changes) { + } else if rc > f.lastRV { return nil, errors.New("resource version in the future not supported by this fake") } return f.Broadcaster.Watch(), nil diff --git a/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source_test.go b/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source_test.go index e5097c7a4..817d45cd0 100644 --- a/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source_test.go +++ b/vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source_test.go @@ -20,7 +20,7 @@ import ( "sync" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" ) @@ -93,3 +93,44 @@ func TestRCNumber(t *testing.T) { source.Shutdown() wg.Wait() } + +// TestResetWatch validates that the FakeController correctly mocks a watch +// falling behind and ResourceVersions aging out. +func TestResetWatch(t *testing.T) { + pod := func(name string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + } + + wg := &sync.WaitGroup{} + wg.Add(1) + + source := NewFakeControllerSource() + source.Add(pod("foo")) // RV = 1 + source.Modify(pod("foo")) // RV = 2 + source.Modify(pod("foo")) // RV = 3 + + // Kill watch, delete change history + source.ResetWatch() + + // This should fail, RV=1 was lost with ResetWatch + _, err := source.Watch(metav1.ListOptions{ResourceVersion: "1"}) + if err == nil { + t.Fatalf("Unexpected non-error") + } + + // This should succeed, RV=3 is current + w, err := source.Watch(metav1.ListOptions{ResourceVersion: "3"}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Modify again, ensure the watch is still working + source.Modify(pod("foo")) + go consume(t, w, []string{"4"}, wg) + source.Shutdown() + wg.Wait() +}