diff --git a/apis/go.sum b/apis/go.sum index 9ee32435bb3..765c469ce64 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -18,7 +18,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -45,7 +44,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -143,7 +141,6 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -181,7 +178,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -190,7 +186,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/apis/scheme/scheme.go b/apis/scheme/scheme.go index ada21321360..0afe1edf572 100644 --- a/apis/scheme/scheme.go +++ b/apis/scheme/scheme.go @@ -49,7 +49,7 @@ limitations under the License. // } // // func main() { -// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ // Scheme: scheme, // }) // // ... @@ -69,7 +69,7 @@ type Builder struct { runtime.SchemeBuilder } -// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. func (bld *Builder) Register(object ...runtime.Object) *Builder { bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { scheme.AddKnownTypes(bld.GroupVersion, object...) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 659d0641967..9de5049f3eb 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -27,7 +27,7 @@ import ( "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/runtime/signals" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" openshiftapiv1 "github.com/openshift/api/config/v1" _ "github.com/openshift/generic-admission-server/pkg/cmd" diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 717158e2f35..ab0b19359d7 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -24,7 +24,7 @@ import ( apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/runtime/signals" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" oappsv1 "github.com/openshift/api/apps/v1" orbacv1 "github.com/openshift/api/authorization/v1" diff --git a/go.mod b/go.mod index f265f2df1aa..4909d059b7e 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( sigs.k8s.io/cluster-api-provider-aws v0.0.0 sigs.k8s.io/cluster-api-provider-azure v0.0.0 sigs.k8s.io/cluster-api-provider-openstack v0.0.0 - sigs.k8s.io/controller-runtime v0.6.2 + sigs.k8s.io/controller-runtime v0.7.0 sigs.k8s.io/controller-tools v0.4.1 sigs.k8s.io/yaml v1.2.0 ) @@ -113,4 +113,4 @@ replace k8s.io/client-go => k8s.io/client-go v0.20.0 replace github.com/hashicorp/go-slug => github.com/hashicorp/go-slug v0.5.0 // point sigs.k8s.io/controller-runtime to forked version -replace sigs.k8s.io/controller-runtime => github.com/openshift-hive/controller-runtime v0.6.2-openshift +replace sigs.k8s.io/controller-runtime => github.com/openshift-hive/controller-runtime v0.7.0-openshift diff --git a/go.sum b/go.sum index a321145d1a0..3d7b5eb97cf 100644 --- a/go.sum +++ b/go.sum @@ -537,7 +537,6 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= @@ -1065,6 +1064,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= @@ -1394,8 +1394,8 @@ github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5X github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/openshift-hive/controller-runtime v0.6.2-openshift h1:BMGNPq2msQOuDp0RC/45OLpt52ozqX5FkzscPQ4nNw0= -github.com/openshift-hive/controller-runtime v0.6.2-openshift/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +github.com/openshift-hive/controller-runtime v0.7.0-openshift h1:zutnlsqkgc++5Ozj+s80Ay1xkarXrvou8j17WKKCaNU= +github.com/openshift-hive/controller-runtime v0.7.0-openshift/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= github.com/openshift-metal3/terraform-provider-ironic v0.2.4/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= github.com/openshift/api v0.0.0-20191219222812-2987a591a72c/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/api v0.0.0-20200205133042-34f0ec8dab87/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= @@ -1586,7 +1586,6 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -1914,6 +1913,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= @@ -1926,6 +1927,8 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20191010144846-132d2879e1e9/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= @@ -2321,8 +2324,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= @@ -2447,7 +2450,6 @@ k8s.io/api v0.18.0-beta.2/go.mod h1:2oeNnWEqcSmaM/ibSh3t7xcIqbkGXhzZdn4ezV9T4m0= k8s.io/api v0.18.0-rc.1/go.mod h1:ZOh6SbHjOYyaMLlWmB2+UOQKEWDpCnVEVpEyt7S2J9s= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.19.1/go.mod h1:+u/k4/K/7vp4vsfdT7dyl8Oxk1F26Md4g5F26Tu85PU= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= @@ -2463,7 +2465,6 @@ k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDd k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.19.4/go.mod h1:B9rpH/nu4JBCtuUp3zTTk8DEjZUupZTBEec7/2zNRYw= @@ -2488,7 +2489,6 @@ k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZF k8s.io/apimachinery v0.18.0-rc.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= @@ -2504,7 +2504,6 @@ k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.19.1/go.mod h1:iRxYIjA0X2XEyoW8KslN4gDhasfH4bWcjj6ckVeZX28= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= @@ -2540,7 +2539,6 @@ k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvj k8s.io/code-generator v0.18.0-rc.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.1/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= @@ -2557,7 +2555,6 @@ k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= k8s.io/component-base v0.18.0-rc.1/go.mod h1:NNlRaxZEdLqTs2+6yXiU2SHl8gKsbcy19Ii+Sfq53RM= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.19.1/go.mod h1:b0vDKYa8EdJJ8dHUA6fGPj4z8taqGks5mfZvp3p/jVo= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= @@ -2604,7 +2601,6 @@ k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4y k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20200831175022-64514a1d5d59/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= @@ -2636,9 +2632,9 @@ k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200411171748-3d5a2fe318e4/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200821003339-5e75c0163111/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= kubevirt.io/client-go v0.29.0/go.mod h1:JY7hQq+SUT0aLvleXrW/+28fDfZ6BPe4E6f8FyC8jkY= diff --git a/pkg/controller/awsprivatelink/awsprivatelink_controller.go b/pkg/controller/awsprivatelink/awsprivatelink_controller.go index 53a93f34d6e..09914bfa29f 100644 --- a/pkg/controller/awsprivatelink/awsprivatelink_controller.go +++ b/pkg/controller/awsprivatelink/awsprivatelink_controller.go @@ -143,7 +143,7 @@ type ReconcileAWSPrivateLink struct { type awsClientFn func(client.Client, awsclient.Options) (awsclient.Client, error) // Reconcile reconciles PrivateLink for ClusterDeployment. -func (r *ReconcileAWSPrivateLink) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) { +func (r *ReconcileAWSPrivateLink) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) logger.Debug("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go b/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go index aa80bee14a3..2ac3545981a 100644 --- a/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go +++ b/pkg/controller/awsprivatelink/awsprivatelink_controller_test.go @@ -1539,7 +1539,7 @@ users: NamespacedName: key, } - _, err := reconciler.Reconcile(reconcileRequest) + _, err := reconciler.Reconcile(context.TODO(), reconcileRequest) if test.err == "" { assert.NoError(t, err, "unexpected error from Reconcile") } else { diff --git a/pkg/controller/clusterclaim/clusterclaim_controller.go b/pkg/controller/clusterclaim/clusterclaim_controller.go index 1fb2d0128df..bf27a5266ce 100644 --- a/pkg/controller/clusterclaim/clusterclaim_controller.go +++ b/pkg/controller/clusterclaim/clusterclaim_controller.go @@ -76,30 +76,21 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterClaim, concurrentRecon // Watch for changes to ClusterDeployment if err := c.Watch( &source.Kind{Type: &hivev1.ClusterDeployment{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(requestsForClusterDeployment), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(requestsForClusterDeployment)); err != nil { return err } // Watch for changes to the hive-claim-owner Role if err := c.Watch( &source.Kind{Type: &rbacv1.Role{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: requestsForRBACResources(r.Client, hiveClaimOwnerRoleName, r.logger), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(requestsForRBACResources(r.Client, hiveClaimOwnerRoleName, r.logger))); err != nil { return err } // Watch for changes to the hive-claim-owner RoleBinding if err := c.Watch( &source.Kind{Type: &rbacv1.Role{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: requestsForRBACResources(r.Client, hiveClaimOwnerRoleBindingName, r.logger), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(requestsForRBACResources(r.Client, hiveClaimOwnerRoleBindingName, r.logger))); err != nil { return err } @@ -119,8 +110,8 @@ func claimForClusterDeployment(cd *hivev1.ClusterDeployment) *types.NamespacedNa } } -func requestsForClusterDeployment(o handler.MapObject) []reconcile.Request { - cd, ok := o.Object.(*hivev1.ClusterDeployment) +func requestsForClusterDeployment(o client.Object) []reconcile.Request { + cd, ok := o.(*hivev1.ClusterDeployment) if !ok { return nil } @@ -131,12 +122,12 @@ func requestsForClusterDeployment(o handler.MapObject) []reconcile.Request { return []reconcile.Request{{NamespacedName: *claim}} } -func requestsForRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.ToRequestsFunc { - return func(o handler.MapObject) []reconcile.Request { - if o.Meta.GetName() != resourceName { +func requestsForRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + if o.GetName() != resourceName { return nil } - clusterName := o.Meta.GetNamespace() + clusterName := o.GetNamespace() cd := &hivev1.ClusterDeployment{} if err := c.Get(context.Background(), client.ObjectKey{Namespace: clusterName, Name: clusterName}, cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to get ClusterDeployment for RBAC resource") @@ -159,7 +150,7 @@ type ReconcileClusterClaim struct { } // Reconcile reconciles a ClusterClaim. -func (r *ReconcileClusterClaim) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) { +func (r *ReconcileClusterClaim) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterClaim", request.NamespacedName) logger.Infof("reconciling cluster claim") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/clusterclaim/clusterclaim_controller_test.go b/pkg/controller/clusterclaim/clusterclaim_controller_test.go index 00519809639..9a08dd3f783 100644 --- a/pkg/controller/clusterclaim/clusterclaim_controller_test.go +++ b/pkg/controller/clusterclaim/clusterclaim_controller_test.go @@ -719,7 +719,7 @@ func TestReconcileClusterClaim(t *testing.T) { }, } - result, err := rcp.Reconcile(reconcileRequest) + result, err := rcp.Reconcile(context.TODO(), reconcileRequest) require.NoError(t, err, "unexpected error from Reconcile") if test.expectedRequeueAfter == nil { diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index d0985644b99..70b9afbe292 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -221,9 +221,7 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci } // Watch for pods created by an install job - err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(selectorPodWatchHandler), - }) + err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(selectorPodWatchHandler)) if err != nil { log.WithField("controller", ControllerName).WithError(err).Error("Error watching cluster deployment pods") return err @@ -284,7 +282,7 @@ type ReconcileClusterDeployment struct { // Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read // and what is in the ClusterDeployment.Spec -func (r *ReconcileClusterDeployment) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) { +func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) @@ -1853,13 +1851,13 @@ func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDepl return nil } -func selectorPodWatchHandler(a handler.MapObject) []reconcile.Request { +func selectorPodWatchHandler(a client.Object) []reconcile.Request { retval := []reconcile.Request{} - pod := a.Object.(*corev1.Pod) + pod := a.(*corev1.Pod) if pod == nil { // Wasn't a Pod, bail out. This should not happen. - log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a.Object) + log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a) return retval } if pod.Labels == nil { diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index c5efd8fa26d..4c355198842 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -1577,7 +1577,7 @@ func TestClusterDeploymentReconcile(t *testing.T) { mockRemoteClientBuilder.EXPECT().Build().Return(testRemoteClusterAPIClient(), nil) } - result, err := rcd.Reconcile(reconcileRequest) + result, err := rcd.Reconcile(context.TODO(), reconcileRequest) if test.validate != nil { test.validate(fakeClient, t) @@ -1635,7 +1635,7 @@ func TestClusterDeploymentReconcileResults(t *testing.T) { remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder }, } - reconcileResult, err := rcd.Reconcile(reconcile.Request{ + reconcileResult, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testName, Namespace: testNamespace, @@ -2199,7 +2199,7 @@ func TestUpdatePullSecretInfo(t *testing.T) { }, } - _, err := rcd.Reconcile(reconcile.Request{ + _, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testName, Namespace: testNamespace, diff --git a/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go b/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go index af8b5f51ef4..f3482cf8632 100644 --- a/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go +++ b/pkg/controller/clusterdeprovision/clusterdeprovision_controller.go @@ -147,7 +147,7 @@ type ReconcileClusterDeprovision struct { // Reconcile reads that state of the cluster for a ClusterDeprovision object and makes changes based on the state read // and what is in the ClusterDeprovision.Spec -func (r *ReconcileClusterDeprovision) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterDeprovision) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { rLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeprovision", request.NamespacedName) // For logging, we need to see when the reconciliation loop starts and ends. rLog.Info("reconciling cluster deprovision request") diff --git a/pkg/controller/clusterdeprovision/clusterdeprovision_controller_test.go b/pkg/controller/clusterdeprovision/clusterdeprovision_controller_test.go index 14378baaf98..70f29ead3d2 100644 --- a/pkg/controller/clusterdeprovision/clusterdeprovision_controller_test.go +++ b/pkg/controller/clusterdeprovision/clusterdeprovision_controller_test.go @@ -260,7 +260,7 @@ func TestClusterDeprovisionReconcile(t *testing.T) { return mocks.mockAWSClient, nil }}} - _, err := r.Reconcile(reconcile.Request{ + _, err := r.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testName, Namespace: testNamespace, diff --git a/pkg/controller/clusterpool/clusterdeploymentexpectations.go b/pkg/controller/clusterpool/clusterdeploymentexpectations.go index da669a6d757..e7cad6e012a 100644 --- a/pkg/controller/clusterpool/clusterdeploymentexpectations.go +++ b/pkg/controller/clusterpool/clusterdeploymentexpectations.go @@ -3,6 +3,7 @@ package clusterpool import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -13,21 +14,19 @@ import ( ) func (r *ReconcileClusterPool) watchClusterDeployments(c controller.Controller) error { - handler := &clusterDeploymentEventHandler{ - EventHandler: &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc( - func(a handler.MapObject) []reconcile.Request { - cpKey := clusterPoolKey(a.Object.(*hivev1.ClusterDeployment)) - if cpKey == nil { - return nil - } - return []reconcile.Request{{NamespacedName: *cpKey}} - }, - ), - }, + h := &clusterDeploymentEventHandler{ + EventHandler: handler.EnqueueRequestsFromMapFunc( + func(a client.Object) []reconcile.Request { + cpKey := clusterPoolKey(a.(*hivev1.ClusterDeployment)) + if cpKey == nil { + return nil + } + return []reconcile.Request{{NamespacedName: *cpKey}} + }, + ), reconciler: r, } - return c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, handler) + return c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, h) } var _ handler.EventHandler = &clusterDeploymentEventHandler{} diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index aeb9c5600e9..6f1fcf8b755 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -96,22 +96,20 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc } // Watch for changes to ClusterClaims - enqueuePoolForClaim := &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc( - func(o handler.MapObject) []reconcile.Request { - claim, ok := o.Object.(*hivev1.ClusterClaim) - if !ok { - return nil - } - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{ - Namespace: claim.Namespace, - Name: claim.Spec.ClusterPoolName, - }, - }} - }, - ), - } + enqueuePoolForClaim := handler.EnqueueRequestsFromMapFunc( + func(o client.Object) []reconcile.Request { + claim, ok := o.(*hivev1.ClusterClaim) + if !ok { + return nil + } + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Namespace: claim.Namespace, + Name: claim.Spec.ClusterPoolName, + }, + }} + }, + ) if err := c.Watch(&source.Kind{Type: &hivev1.ClusterClaim{}}, enqueuePoolForClaim); err != nil { return err } @@ -119,9 +117,8 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc // Watch for changes to the hive cluster pool admin RoleBindings if err := c.Watch( &source.Kind{Type: &rbacv1.RoleBinding{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: requestsForRBACResources(r.Client, r.logger), - }, + handler.EnqueueRequestsFromMapFunc( + requestsForRBACResources(r.Client, r.logger)), ); err != nil { return err } @@ -129,9 +126,8 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc // Watch for changes to the hive-cluster-pool-admin-binding RoleBinding if err := c.Watch( &source.Kind{Type: &rbacv1.RoleBinding{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: requestsForCDRBACResources(r.Client, clusterPoolAdminRoleBindingName, r.logger), - }, + handler.EnqueueRequestsFromMapFunc( + requestsForCDRBACResources(r.Client, clusterPoolAdminRoleBindingName, r.logger)), ); err != nil { return err } @@ -139,12 +135,12 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc return nil } -func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.ToRequestsFunc { - return func(o handler.MapObject) []reconcile.Request { - if o.Meta.GetName() != resourceName { +func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + if o.GetName() != resourceName { return nil } - clusterName := o.Meta.GetNamespace() + clusterName := o.GetNamespace() cd := &hivev1.ClusterDeployment{} if err := c.Get(context.Background(), client.ObjectKey{Namespace: clusterName, Name: clusterName}, cd); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to get ClusterDeployment for RBAC resource") @@ -158,9 +154,9 @@ func requestsForCDRBACResources(c client.Client, resourceName string, logger log } } -func requestsForRBACResources(c client.Client, logger log.FieldLogger) handler.ToRequestsFunc { - return func(o handler.MapObject) []reconcile.Request { - binding, ok := o.Object.(*rbacv1.RoleBinding) +func requestsForRBACResources(c client.Client, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + binding, ok := o.(*rbacv1.RoleBinding) if !ok { return nil } @@ -169,7 +165,7 @@ func requestsForRBACResources(c client.Client, logger log.FieldLogger) handler.T } cpList := &hivev1.ClusterPoolList{} - if err := c.List(context.Background(), cpList, client.InNamespace(o.Meta.GetNamespace())); err != nil { + if err := c.List(context.Background(), cpList, client.InNamespace(o.GetNamespace())); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to list cluster pools for RBAC resource") return nil } @@ -198,7 +194,7 @@ type ReconcileClusterPool struct { // Reconcile reads the state of the ClusterPool, checks if we currently have enough ClusterDeployments waiting, and // attempts to reach the desired state if not. -func (r *ReconcileClusterPool) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterPool", request.NamespacedName) logger.Infof("reconciling cluster pool") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) @@ -581,7 +577,7 @@ func (r *ReconcileClusterPool) createCluster( } // Create the resources. for _, obj := range objs { - if err := r.Client.Create(context.Background(), obj); err != nil { + if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { r.expectations.CreationObserved(poolKey) return err } diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index ff4d99579c5..5540c9136fc 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -904,7 +904,7 @@ func TestReconcileClusterPool(t *testing.T) { }, } - _, err := rcp.Reconcile(reconcileRequest) + _, err := rcp.Reconcile(context.TODO(), reconcileRequest) if test.expectError { assert.Error(t, err, "expected error from reconcile") } else { diff --git a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go index c8bbff66ab3..36b052f8342 100644 --- a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go +++ b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller.go @@ -74,18 +74,15 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci } // Watch for changes to ClusterDeployment - cdMapFn := func(a handler.MapObject) []reconcile.Request { - cd := a.Object.(*hivev1.ClusterDeployment) + cdMapFn := func(a client.Object) []reconcile.Request { + cd := a.(*hivev1.ClusterDeployment) return []reconcile.Request{{ NamespacedName: types.NamespacedName{Name: cd.Namespace}, }} } if err := c.Watch( &source.Kind{Type: &hivev1.ClusterDeployment{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(cdMapFn), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(cdMapFn)); err != nil { return err } @@ -102,7 +99,7 @@ type ReconcileClusterPoolNamespace struct { } // Reconcile deletes a Namespace if it no longer contains any ClusterDeployments. -func (r *ReconcileClusterPoolNamespace) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterPoolNamespace) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "namespace", request.NamespacedName) logger.Info("reconciling namespace") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller_test.go b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller_test.go index cdc412716ff..00a5ff3da9e 100644 --- a/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller_test.go +++ b/pkg/controller/clusterpoolnamespace/clusterpoolnamespace_controller_test.go @@ -212,7 +212,7 @@ func TestReconcileClusterPoolNamespace_Reconcile_Movement(t *testing.T) { logger: logger, } namespaceKey := client.ObjectKey{Name: namespaceName} - result, err := reconciler.Reconcile(reconcile.Request{NamespacedName: namespaceKey}) + result, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespaceKey}) endTime := time.Now() require.NoError(t, err, "unexpected error during reconcile") diff --git a/pkg/controller/clusterprovision/clusterprovision_controller.go b/pkg/controller/clusterprovision/clusterprovision_controller.go index 43984983536..c74ff7f4642 100644 --- a/pkg/controller/clusterprovision/clusterprovision_controller.go +++ b/pkg/controller/clusterprovision/clusterprovision_controller.go @@ -121,9 +121,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, } // Watch for changes to ClusterDeployment - if err := c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(clusterDeploymentWatchHandler), - }); err != nil { + if err := c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, handler.EnqueueRequestsFromMapFunc(clusterDeploymentWatchHandler)); err != nil { return errors.Wrap(err, "could not watch clusterdeployments") } @@ -143,7 +141,7 @@ type ReconcileClusterProvision struct { // Reconcile reads that state of the cluster for a ClusterProvision object and makes changes based on the state read // and what is in the ClusterProvision.Spec -func (r *ReconcileClusterProvision) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterProvision) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { pLog := controllerutils.BuildControllerLogger(ControllerName, "clusterProvision", request.NamespacedName) pLog.Info("reconciling cluster provision") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, pLog) @@ -504,11 +502,11 @@ func (r *ReconcileClusterProvision) existingJobs(provision *hivev1.ClusterProvis return jobs, nil } -func clusterDeploymentWatchHandler(a handler.MapObject) []reconcile.Request { - cd := a.Object.(*hivev1.ClusterDeployment) +func clusterDeploymentWatchHandler(a client.Object) []reconcile.Request { + cd := a.(*hivev1.ClusterDeployment) if cd == nil { // Wasn't a ClusterDeployment, bail out. This should not happen. - log.Errorf("Error converting MapObject.Object to ClusterDeployment. Value: %+v", a.Object) + log.Errorf("Error converting MapObject.Object to ClusterDeployment. Value: %+v", a) return nil } diff --git a/pkg/controller/clusterprovision/clusterprovision_controller_test.go b/pkg/controller/clusterprovision/clusterprovision_controller_test.go index c63316792d9..699b603a16e 100644 --- a/pkg/controller/clusterprovision/clusterprovision_controller_test.go +++ b/pkg/controller/clusterprovision/clusterprovision_controller_test.go @@ -269,7 +269,7 @@ func TestClusterProvisionReconcile(t *testing.T) { controllerExpectations.ExpectCreations(reconcileRequest.String(), 1) } - result, err := rcp.Reconcile(reconcileRequest) + result, err := rcp.Reconcile(context.TODO(), reconcileRequest) if test.validateRequeueAfter != nil { test.validateRequeueAfter(result.RequeueAfter, fakeClient, t) diff --git a/pkg/controller/clusterrelocate/clientwrapper_test.go b/pkg/controller/clusterrelocate/clientwrapper_test.go index 8a3f6af7c8f..eb1fd64d91e 100644 --- a/pkg/controller/clusterrelocate/clientwrapper_test.go +++ b/pkg/controller/clusterrelocate/clientwrapper_test.go @@ -5,7 +5,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -15,7 +14,7 @@ type deleteBlockingClientWrapper struct { var _ client.Client = (*deleteBlockingClientWrapper)(nil) -func (c *deleteBlockingClientWrapper) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { +func (c *deleteBlockingClientWrapper) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { a, err := meta.Accessor(obj) if err != nil { return err diff --git a/pkg/controller/clusterrelocate/clusterrelocate_controller.go b/pkg/controller/clusterrelocate/clusterrelocate_controller.go index 5cca8a54660..37e797e6eff 100644 --- a/pkg/controller/clusterrelocate/clusterrelocate_controller.go +++ b/pkg/controller/clusterrelocate/clusterrelocate_controller.go @@ -97,9 +97,8 @@ func Add(mgr manager.Manager) error { } // Watch for changes to ClusterRelocate - if err := c.Watch(&source.Kind{Type: &hivev1.ClusterRelocate{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(r.clusterRelocateHandlerFunc), - }); err != nil { + if err := c.Watch(&source.Kind{Type: &hivev1.ClusterRelocate{}}, + handler.EnqueueRequestsFromMapFunc(r.clusterRelocateHandlerFunc)); err != nil { logger.WithError(err).Error("Error watching ClusterRelocate") return err } @@ -107,8 +106,8 @@ func Add(mgr manager.Manager) error { return nil } -func (r *ReconcileClusterRelocate) clusterRelocateHandlerFunc(a handler.MapObject) (requests []reconcile.Request) { - clusterRelocate := a.Object.(*hivev1.ClusterRelocate) +func (r *ReconcileClusterRelocate) clusterRelocateHandlerFunc(a client.Object) (requests []reconcile.Request) { + clusterRelocate := a.(*hivev1.ClusterRelocate) labelSelector, err := metav1.LabelSelectorAsSelector(&clusterRelocate.Spec.ClusterDeploymentSelector) if err != nil { @@ -150,7 +149,7 @@ type ReconcileClusterRelocate struct { } // Reconcile relocates ClusterDeployments matching with a ClusterRelocate to another Hive instance. -func (r *ReconcileClusterRelocate) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterRelocate) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) logger.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) @@ -470,12 +469,7 @@ func (r *ReconcileClusterRelocate) reconcileNoSingleMatch(cd *hivev1.ClusterDepl // Hive can or should do to resolve this. The ClusterDeployment cannot be relocated to the destination cluster // unless the existing ClusterDeployment on the destination cluster is deleted. func (r *ReconcileClusterRelocate) checkForExistingClusterDeployment(cd *hivev1.ClusterDeployment, destClient client.Client, logger log.FieldLogger) (proceed bool, completed bool, returnErr error) { - cdKey, err := client.ObjectKeyFromObject(cd) - if err != nil { - logger.WithError(err).Error("could not get object key for clusterdeployment") - returnErr = errors.Wrap(err, "could not get object key for clusterdeployment") - return - } + cdKey := client.ObjectKeyFromObject(cd) destCD := &hivev1.ClusterDeployment{} switch err := destClient.Get(context.Background(), cdKey, destCD); { // no ClusterDeployment in destination cluster @@ -524,7 +518,7 @@ func (r *ReconcileClusterRelocate) copy(cd *hivev1.ClusterDeployment, destClient // copy dependent resources for _, t := range typesToCopy() { - if err := r.copyResources(cd, destClient, t, logger); err != nil { + if err := r.copyResources(cd, destClient, t.(client.ObjectList), logger); err != nil { return errors.Wrapf(err, "failed to copy %T", t) } } @@ -554,7 +548,7 @@ func (r *ReconcileClusterRelocate) copy(cd *hivev1.ClusterDeployment, destClient // copyResources copies all of the resources of the given object type in the namespace of the ClusterDeployment to the // destination cluster -func (r *ReconcileClusterRelocate) copyResources(cd *hivev1.ClusterDeployment, destClient client.Client, objectList runtime.Object, logger log.FieldLogger) error { +func (r *ReconcileClusterRelocate) copyResources(cd *hivev1.ClusterDeployment, destClient client.Client, objectList client.ObjectList, logger log.FieldLogger) error { logger = logger.WithField("type", reflect.TypeOf(objectList)) if err := r.List(context.Background(), objectList, client.InNamespace(cd.Namespace)); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not list resources") @@ -594,7 +588,7 @@ func (r *ReconcileClusterRelocate) copyResource(obj runtime.Object, destClient c return errors.Wrap(err, "could not clear fields from source object") } // Need to use a copy here so that `obj` is left unaltered if the resource already exists on the remote cluster. - objToCreate := obj.DeepCopyObject() + objToCreate := obj.DeepCopyObject().(client.Object) switch err := destClient.Create(context.Background(), objToCreate); { case err == nil: logger.Info("resource created in destination cluster") @@ -603,7 +597,7 @@ func (r *ReconcileClusterRelocate) copyResource(obj runtime.Object, destClient c return errors.Wrap(err, "resource already exists in destination cluster") } logger.Info("resource already exists in destination cluster; replacing if there are changes") - if err := r.replaceResourceIfChanged(destClient, obj, logger); err != nil { + if err := r.replaceResourceIfChanged(destClient, obj.(client.Object), logger); err != nil { return errors.Wrap(err, "failed to sync existing resource") } default: @@ -613,14 +607,10 @@ func (r *ReconcileClusterRelocate) copyResource(obj runtime.Object, destClient c return nil } -func (r *ReconcileClusterRelocate) replaceResourceIfChanged(destClient client.Client, srcObj runtime.Object, logger log.FieldLogger) error { +func (r *ReconcileClusterRelocate) replaceResourceIfChanged(destClient client.Client, srcObj client.Object, logger log.FieldLogger) error { // Get the object from the destination cluster - objKey, err := client.ObjectKeyFromObject(srcObj) - if err != nil { - logger.WithError(err).Error("could not get object key") - return errors.Wrap(err, "could not get object key") - } - destObj := reflect.New(reflect.TypeOf(srcObj).Elem()).Interface().(runtime.Object) + objKey := client.ObjectKeyFromObject(srcObj) + destObj := reflect.New(reflect.TypeOf(srcObj).Elem()).Interface().(client.Object) if err := destClient.Get(context.Background(), objKey, destObj); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not get resource from destination cluster") return errors.Wrap(err, "could not get resource from destination cluster") diff --git a/pkg/controller/clusterrelocate/clusterrelocate_controller_test.go b/pkg/controller/clusterrelocate/clusterrelocate_controller_test.go index e783255fe7c..b6f7ce06c9c 100644 --- a/pkg/controller/clusterrelocate/clusterrelocate_controller_test.go +++ b/pkg/controller/clusterrelocate/clusterrelocate_controller_test.go @@ -91,7 +91,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { cd *hivev1.ClusterDeployment srcResources []runtime.Object destResources []runtime.Object - expectedResources []runtime.Object + expectedResources []client.Object }{ { name: "no relocation", @@ -103,7 +103,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { srcResources: []runtime.Object{ crBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -119,7 +119,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ cdBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ cdBuilder.Build(), }, }, @@ -136,7 +136,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { }, ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ cdBuilder.Build( func(cd *hivev1.ClusterDeployment) { cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{} @@ -157,7 +157,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ cdBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ cdBuilder.Build(), }, }, @@ -172,7 +172,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testcd.Generic(testgeneric.WithResourceVersion("some-rv")), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ cdBuilder.Build( testcd.Generic(testgeneric.WithResourceVersion("some-rv")), ), @@ -187,7 +187,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ namespaceBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -201,7 +201,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), secretBuilder.Build(testsecret.WithDataKeyValue("test-key", []byte("test-data"))), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -223,7 +223,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testsecret.WithDataKeyValue("test-key-2", []byte("test-data-2")), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -253,7 +253,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testsecret.WithDataKeyValue("test-key-2", []byte("test-data-2")), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -274,7 +274,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ secretBuilder.Build(testsecret.WithDataKeyValue("test-key", []byte("test-data"))), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -292,7 +292,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ secretBuilder.Build(testsecret.WithDataKeyValue("test-key", []byte("other-data"))), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -314,7 +314,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { destResources: []runtime.Object{ mpBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -337,7 +337,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testsecret.WithDataKeyValue("test-key", []byte("test-data")), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -358,7 +358,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testsecret.WithDataKeyValue("test-key", []byte("test-data")), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -386,7 +386,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { }, ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -400,7 +400,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), cmBuilder.Build(testcm.WithDataKeyValue("test-key", "test-data")), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -415,7 +415,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), mpBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -430,7 +430,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), ssBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -445,7 +445,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), sipBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -460,7 +460,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), dnsZoneBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -477,7 +477,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), dnsZoneBuilder.Build(testdnszone.Generic(testgeneric.WithName("other-dnszone"))), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -491,7 +491,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { crBuilder.Build(), jobBuilder.Build(), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -633,7 +633,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { testsip.ForIdentities("other-user"), ), }, - expectedResources: []runtime.Object{ + expectedResources: []client.Object{ namespaceBuilder.Build(), cdBuilder.Build( testcd.Generic(withRelocateAnnotation(crName, hivev1.RelocateIncoming)), @@ -725,7 +725,7 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { return mockRemoteClientBuilder }, } - _, err := reconciler.Reconcile(reconcile.Request{ + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: cdName, Namespace: namespace, @@ -734,11 +734,8 @@ func TestReconcileClusterRelocate_Reconcile_Movement(t *testing.T) { require.NoError(t, err, "unexpected error during reconcile") for _, obj := range tc.expectedResources { - objKey, err := client.ObjectKeyFromObject(obj) - if !assert.NoError(t, err, "unexpected error getting object key") { - continue - } - destObj := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + objKey := client.ObjectKeyFromObject(obj) + destObj := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(client.Object) err = destClient.Get(context.Background(), objKey, destObj) if !assert.NoError(t, err, "unexpected error getting destination object") { continue @@ -1004,7 +1001,7 @@ func TestReconcileClusterRelocate_Reconcile_RelocateStatus(t *testing.T) { return mockRemoteClientBuilder }, } - _, err := reconciler.Reconcile(reconcile.Request{ + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: cdName, Namespace: namespace, diff --git a/pkg/controller/clusterstate/clusterstate_controller.go b/pkg/controller/clusterstate/clusterstate_controller.go index 02400b96fc4..be7dd36b507 100644 --- a/pkg/controller/clusterstate/clusterstate_controller.go +++ b/pkg/controller/clusterstate/clusterstate_controller.go @@ -100,7 +100,7 @@ type ReconcileClusterState struct { } // Reconcile ensures that a given ClusterState resource exists and reflects the state of cluster operators from its target cluster -func (r *ReconcileClusterState) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterState) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) logger.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/clusterstate/clusterstate_controller_test.go b/pkg/controller/clusterstate/clusterstate_controller_test.go index 7c9c42384c5..7b8bdf43376 100644 --- a/pkg/controller/clusterstate/clusterstate_controller_test.go +++ b/pkg/controller/clusterstate/clusterstate_controller_test.go @@ -178,7 +178,7 @@ func TestClusterStateReconcile(t *testing.T) { }, } - result, err := rcd.Reconcile(reconcile.Request{ + result, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testName, Namespace: testNamespace, diff --git a/pkg/controller/clustersync/clientwrapper_test.go b/pkg/controller/clustersync/clientwrapper_test.go index dcf70334a0b..999ab01f79b 100644 --- a/pkg/controller/clustersync/clientwrapper_test.go +++ b/pkg/controller/clustersync/clientwrapper_test.go @@ -3,7 +3,6 @@ package clustersync import ( "context" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" @@ -15,7 +14,7 @@ type clientWrapper struct { var _ client.Client = (*clientWrapper)(nil) -func (c *clientWrapper) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { +func (c *clientWrapper) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { switch t := obj.(type) { case *hiveintv1alpha1.ClusterSync: t.APIVersion = hiveintv1alpha1.SchemeGroupVersion.String() diff --git a/pkg/controller/clustersync/clustersync_controller.go b/pkg/controller/clustersync/clustersync_controller.go index 7a1bc0883fb..c741eeed51f 100644 --- a/pkg/controller/clustersync/clustersync_controller.go +++ b/pkg/controller/clustersync/clustersync_controller.go @@ -199,28 +199,22 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterSync, concurrentReconc // Watch for changes to SyncSets if err := c.Watch( &source.Kind{Type: &hivev1.SyncSet{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(requestsForSyncSet), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(requestsForSyncSet)); err != nil { return err } // Watch for changes to SelectorSyncSets if err := c.Watch( &source.Kind{Type: &hivev1.SelectorSyncSet{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: requestsForSelectorSyncSet(r.Client, r.logger), - }, - ); err != nil { + handler.EnqueueRequestsFromMapFunc(requestsForSelectorSyncSet(r.Client, r.logger))); err != nil { return err } return nil } -func requestsForSyncSet(o handler.MapObject) []reconcile.Request { - ss, ok := o.Object.(*hivev1.SyncSet) +func requestsForSyncSet(o client.Object) []reconcile.Request { + ss, ok := o.(*hivev1.SyncSet) if !ok { return nil } @@ -232,9 +226,9 @@ func requestsForSyncSet(o handler.MapObject) []reconcile.Request { return requests } -func requestsForSelectorSyncSet(c client.Client, logger log.FieldLogger) handler.ToRequestsFunc { - return func(o handler.MapObject) []reconcile.Request { - sss, ok := o.Object.(*hivev1.SelectorSyncSet) +func requestsForSelectorSyncSet(c client.Client, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + sss, ok := o.(*hivev1.SelectorSyncSet) if !ok { return nil } @@ -335,7 +329,7 @@ func (r *ReconcileClusterSync) isSyncAssignedToMe(sts *appsv1.StatefulSet, cd *h // Reconcile reads the state of the ClusterDeployment and applies any SyncSets or SelectorSyncSets that need to be // applied or re-applied. -func (r *ReconcileClusterSync) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterSync) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) logger.Infof("reconciling ClusterDeployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/clustersync/clustersync_controller_test.go b/pkg/controller/clustersync/clustersync_controller_test.go index c06c9c8d13c..bc367cd4985 100644 --- a/pkg/controller/clustersync/clustersync_controller_test.go +++ b/pkg/controller/clustersync/clustersync_controller_test.go @@ -130,7 +130,7 @@ func (rt *reconcileTest) run(t *testing.T) { startTime := time.Now() timeSinceOrigLeaseRenewTime := time.Since(origLeaseRenewTime.Time) - result, err := rt.r.Reconcile(reconcileRequest) + result, err := rt.r.Reconcile(context.TODO(), reconcileRequest) require.NoError(t, err, "unexpected error from Reconcile") endTime := time.Now() startTime = startTime.Truncate(time.Second) @@ -256,7 +256,7 @@ func TestReconcileClusterSync_NewClusterDeployment(t *testing.T) { Name: testCDName, }, } - result, err := rt.r.Reconcile(reconcileRequest) + result, err := rt.r.Reconcile(context.TODO(), reconcileRequest) require.NoError(t, err, "unexpected error from Reconcile") assert.Equal(t, result, reconcile.Result{Requeue: true}, "unexpected result from reconcile") err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeaseName}, &hiveintv1alpha1.ClusterSyncLease{}) diff --git a/pkg/controller/clusterversion/clusterversion_controller.go b/pkg/controller/clusterversion/clusterversion_controller.go index 10df8fb5f96..f391bd9e2ad 100644 --- a/pkg/controller/clusterversion/clusterversion_controller.go +++ b/pkg/controller/clusterversion/clusterversion_controller.go @@ -92,7 +92,7 @@ type ReconcileClusterVersion struct { // Reconcile reads that state of the cluster for a ClusterDeployment object and syncs the remote ClusterVersion status // if the remote cluster is available. -func (r *ReconcileClusterVersion) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileClusterVersion) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/clusterversion/clusterversion_controller_test.go b/pkg/controller/clusterversion/clusterversion_controller_test.go index b918dc28890..2610cf31753 100644 --- a/pkg/controller/clusterversion/clusterversion_controller_test.go +++ b/pkg/controller/clusterversion/clusterversion_controller_test.go @@ -100,7 +100,7 @@ func TestClusterVersionReconcile(t *testing.T) { Namespace: testNamespace, } - _, err := rcd.Reconcile(reconcile.Request{NamespacedName: namespacedName}) + _, err := rcd.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) if test.validate != nil { cd := &hivev1.ClusterDeployment{} diff --git a/pkg/controller/controlplanecerts/controlplanecerts_controller.go b/pkg/controller/controlplanecerts/controlplanecerts_controller.go index ceea21d85d7..963fcfbd646 100644 --- a/pkg/controller/controlplanecerts/controlplanecerts_controller.go +++ b/pkg/controller/controlplanecerts/controlplanecerts_controller.go @@ -121,7 +121,7 @@ type ReconcileControlPlaneCerts struct { // Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read // and what is in the ClusterDeployment.Spec -func (r *ReconcileControlPlaneCerts) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileControlPlaneCerts) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/controlplanecerts/controlplanecerts_controller_test.go b/pkg/controller/controlplanecerts/controlplanecerts_controller_test.go index 90e028bbf45..e834adef401 100644 --- a/pkg/controller/controlplanecerts/controlplanecerts_controller_test.go +++ b/pkg/controller/controlplanecerts/controlplanecerts_controller_test.go @@ -165,7 +165,7 @@ func TestReconcileControlPlaneCerts(t *testing.T) { applier: applier, } - _, err := r.Reconcile(reconcile.Request{ + _, err := r.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: fakeName, Namespace: fakeNamespace, diff --git a/pkg/controller/dnsendpoint/dnsendpoint_controller.go b/pkg/controller/dnsendpoint/dnsendpoint_controller.go index 2a51f70b984..131c7ff1914 100644 --- a/pkg/controller/dnsendpoint/dnsendpoint_controller.go +++ b/pkg/controller/dnsendpoint/dnsendpoint_controller.go @@ -9,7 +9,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -21,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/dnsendpoint/nameserver" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" @@ -126,13 +126,8 @@ func newReconciler(mgr manager.Manager, kubeClient client.Client) (*ReconcileDNS continue } - registerNameServerChange := func(objectKey client.ObjectKey) { - nameServerChangeNotifier <- event.GenericEvent{ - Meta: &metav1.ObjectMeta{ - Namespace: objectKey.Namespace, - Name: objectKey.Name, - }, - } + registerNameServerChange := func(obj client.Object) { + nameServerChangeNotifier <- event.GenericEvent{Object: obj} } nameServerScraper := newNameServerScraper(logger, nameServerQuery, md.Domains, registerNameServerChange) if err := mgr.Add(nameServerScraper); err != nil { @@ -164,7 +159,7 @@ type ReconcileDNSEndpoint struct { // Reconcile reads that state of the cluster for a DNSEndpoint object and makes changes based on the state read // and what is in the DNSEndpoint.Spec -func (r *ReconcileDNSEndpoint) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileDNSEndpoint) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { dnsLog := controllerutils.BuildControllerLogger(ControllerName, "dnsZone", request.NamespacedName) dnsLog.Info("reconciling dns endpoint") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, dnsLog) @@ -253,7 +248,7 @@ func (r *ReconcileDNSEndpoint) Reconcile(request reconcile.Request) (reconcile.R return reconcile.Result{}, err } - nsTool.scraper.AddEndpoint(request.NamespacedName, fullDomain, desiredNameServers) + nsTool.scraper.AddEndpoint(instance, fullDomain, desiredNameServers) // NS needs to be deleted, either because the DNSZone has been deleted or because // there are no targets for the NS. diff --git a/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go b/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go index 1c9844729d2..236fc68ccb8 100644 --- a/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go +++ b/pkg/controller/dnsendpoint/dnsendpoint_controller_test.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "sigs.k8s.io/controller-runtime/pkg/healthz" corev1 "k8s.io/api/core/v1" @@ -81,8 +82,8 @@ func TestDNSEndpointReconcile(t *testing.T) { expectedNameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -100,16 +101,16 @@ func TestDNSEndpointReconcile(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, expectedNameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -127,8 +128,8 @@ func TestDNSEndpointReconcile(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("old-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("old-value"), }, }, }, @@ -138,8 +139,8 @@ func TestDNSEndpointReconcile(t *testing.T) { expectedNameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -157,8 +158,8 @@ func TestDNSEndpointReconcile(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -283,8 +284,8 @@ func TestDNSEndpointReconcile(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ dnsName: endpointState{ - objectKey: objectKey, - nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value-1", "test-value-2", "test-value-3"), }, }, }, @@ -340,14 +341,14 @@ func TestDNSEndpointReconcile(t *testing.T) { }, }, } - result, err := cut.Reconcile(reconcile.Request{NamespacedName: objectKey}) + result, err := cut.Reconcile(context.TODO(), reconcile.Request{NamespacedName: objectKey}) if tc.expectErr { assert.Error(t, err, "expected error from reconcile") } else { assert.NoError(t, err, "expected no error from reconcile") } assert.Equal(t, reconcile.Result{}, result, "unexpected reconcile result") - assert.Equal(t, tc.expectedNameServers, scraper.nameServers, "unexpected name servers in scraper") + assertRootDomainsMapEqual(t, tc.expectedNameServers, scraper.nameServers) dnsZone := &hivev1.DNSZone{} if err := fakeClient.Get(context.Background(), objectKey, dnsZone); assert.NoError(t, err, "unexpected error getting DNSZone") { validateConditions(t, dnsZone, tc.expectedConditions) @@ -356,6 +357,20 @@ func TestDNSEndpointReconcile(t *testing.T) { } } +func assertRootDomainsMapEqual(t *testing.T, expected rootDomainsMap, actual rootDomainsMap) { + require.Equal(t, len(expected), len(actual), "unexpected number of root domain map keys") + for rootDomainKey, expectedDomainMap := range expected { + require.Contains(t, actual, rootDomainKey) + actualDomainMap := actual[rootDomainKey] + require.Equal(t, len(expectedDomainMap), len(actualDomainMap), "unexpected number of domain map keys") + for domainKey, expectedEndpointState := range expectedDomainMap { + require.Contains(t, actualDomainMap, domainKey) + actualEndpointState := actualDomainMap[domainKey] + assert.Equal(t, expectedEndpointState.nsValues.List(), actualEndpointState.nsValues.List()) + } + } +} + func validateConditions(t *testing.T, dnsZone *hivev1.DNSZone, conditions []conditionExpectations) { for _, expectedCondition := range conditions { cond := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, expectedCondition.conditionType) @@ -400,7 +415,7 @@ func (*fakeManager) AddHealthzCheck(name string, check healthz.Checker) error { func (*fakeManager) AddReadyzCheck(name string, check healthz.Checker) error { panic("not implemented") } -func (*fakeManager) Start(<-chan struct{}) error { +func (*fakeManager) Start(ctx context.Context) error { panic("not implemented") } func (*fakeManager) GetConfig() *rest.Config { @@ -582,6 +597,27 @@ func testManagedDomain() hivev1.ManageDNSConfig { } } +func testDNSZoneWithNSName(namespace, name string) *hivev1.DNSZone { + return &hivev1.DNSZone{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Finalizers: []string{hivev1.FinalizerDNSEndpoint}, + }, + Spec: hivev1.DNSZoneSpec{ + Zone: dnsName, + LinkToParentDomain: true, + }, + Status: hivev1.DNSZoneStatus{ + NameServers: []string{ + "test-value-1", + "test-value-2", + "test-value-3", + }, + }, + } +} + func testDNSZone() *hivev1.DNSZone { return &hivev1.DNSZone{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/dnsendpoint/nameserverscraper.go b/pkg/controller/dnsendpoint/nameserverscraper.go index b187189a604..76ebb22e856 100644 --- a/pkg/controller/dnsendpoint/nameserverscraper.go +++ b/pkg/controller/dnsendpoint/nameserverscraper.go @@ -1,6 +1,7 @@ package dnsendpoint import ( + "context" "strings" "sync" "time" @@ -12,6 +13,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" + hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/controller/dnsendpoint/nameserver" ) @@ -20,8 +22,8 @@ const ( ) type endpointState struct { - objectKey client.ObjectKey - nsValues sets.String + dnsZone *hivev1.DNSZone + nsValues sets.String } type nameServersMap map[string]endpointState @@ -35,10 +37,10 @@ type nameServerScraper struct { queue workqueue.RateLimitingInterface nameServers rootDomainsMap nameServerQuery nameserver.Query - notifyChange func(client.ObjectKey) + notifyChange func(client.Object) } -func newNameServerScraper(logger log.FieldLogger, nameServerQuery nameserver.Query, domains []string, notifyChange func(client.ObjectKey)) *nameServerScraper { +func newNameServerScraper(logger log.FieldLogger, nameServerQuery nameserver.Query, domains []string, notifyChange func(client.Object)) *nameServerScraper { if len(domains) == 0 { return nil } @@ -67,7 +69,7 @@ func (s *nameServerScraper) GetEndpoint(domain string) (rootDomain string, nameS } // AddEndpoint adds an endpoint with the specified domain. -func (s *nameServerScraper) AddEndpoint(objectKey client.ObjectKey, domain string, nameServers sets.String) { +func (s *nameServerScraper) AddEndpoint(object *hivev1.DNSZone, domain string, nameServers sets.String) { s.mux.Lock() defer s.mux.Unlock() _, nsMap := s.rootDomainNameServers(domain) @@ -75,8 +77,8 @@ func (s *nameServerScraper) AddEndpoint(objectKey client.ObjectKey, domain strin return } nsMap[domain] = endpointState{ - objectKey: objectKey, - nsValues: nameServers, + dnsZone: object, + nsValues: nameServers, } } @@ -96,7 +98,7 @@ func (s *nameServerScraper) HasBeenScraped(domain string) bool { } // Start starts the name server scraper. -func (s *nameServerScraper) Start(stop <-chan struct{}) error { +func (s *nameServerScraper) Start(ctx context.Context) error { defer s.queue.ShutDown() go func() { for { @@ -123,7 +125,7 @@ func (s *nameServerScraper) Start(stop <-chan struct{}) error { }() } }() - <-stop + <-ctx.Done() return nil } @@ -132,7 +134,7 @@ func (s *nameServerScraper) scrape(rootDomain string) error { if err != nil { return errors.Wrap(err, "error querying name servers") } - changedEndpoints := []client.ObjectKey{} + changedEndpoints := []client.Object{} func() { s.mux.Lock() defer s.mux.Unlock() @@ -148,7 +150,7 @@ func (s *nameServerScraper) scrape(rootDomain string) error { for domain, oldNameServer := range oldNameServers { currentNameServer, ok := currentNameServers[domain] if !ok || !currentNameServer.Equal(oldNameServer.nsValues) { - changedEndpoints = append(changedEndpoints, oldNameServer.objectKey) + changedEndpoints = append(changedEndpoints, oldNameServer.dnsZone) oldNameServer.nsValues = currentNameServer oldNameServers[domain] = oldNameServer } diff --git a/pkg/controller/dnsendpoint/nameserverscraper_test.go b/pkg/controller/dnsendpoint/nameserverscraper_test.go index 312414ebdfb..1528ec981d0 100644 --- a/pkg/controller/dnsendpoint/nameserverscraper_test.go +++ b/pkg/controller/dnsendpoint/nameserverscraper_test.go @@ -1,6 +1,7 @@ package dnsendpoint import ( + "context" "testing" "time" @@ -12,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" + hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/controller/dnsendpoint/nameserver/mock" ) @@ -111,7 +113,6 @@ func TestGetEndpoint(t *testing.T) { func TestAddEndpoint(t *testing.T) { rootDomain := "domain.com" domain := "test.domain.com" - objectKey := client.ObjectKey{Namespace: "test-namespace", Name: "test-name"} values := sets.NewString("test-value-1", "test-value-2", "test-value-3") cases := []struct { name string @@ -138,8 +139,8 @@ func TestAddEndpoint(t *testing.T) { expectedNameServers: rootDomainsMap{ rootDomain: nameServersMap{ domain: endpointState{ - objectKey: objectKey, - nsValues: values, + dnsZone: testDNSZone(), + nsValues: values, }, }, }, @@ -155,8 +156,8 @@ func TestAddEndpoint(t *testing.T) { rootDomain: nameServersMap{ "other.domain.com": endpointState{}, domain: endpointState{ - objectKey: objectKey, - nsValues: values, + dnsZone: testDNSZone(), + nsValues: values, }, }, }, @@ -166,16 +167,21 @@ func TestAddEndpoint(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ domain: endpointState{ - objectKey: client.ObjectKey{Namespace: "other-namespace", Name: "other-name"}, - nsValues: sets.NewString("other-value"), + dnsZone: func() *hivev1.DNSZone { + dz := testDNSZone() + dz.Name = "other-name" + dz.Namespace = "other-namespace" + return dz + }(), + nsValues: sets.NewString("other-value"), }, }, }, expectedNameServers: rootDomainsMap{ rootDomain: nameServersMap{ domain: endpointState{ - objectKey: objectKey, - nsValues: values, + dnsZone: testDNSZone(), + nsValues: values, }, }, }, @@ -194,8 +200,8 @@ func TestAddEndpoint(t *testing.T) { rootDomain: nameServersMap{ "other.domain.com": endpointState{}, domain: endpointState{ - objectKey: objectKey, - nsValues: values, + dnsZone: testDNSZone(), + nsValues: values, }, }, "other-domain": nameServersMap{ @@ -207,7 +213,7 @@ func TestAddEndpoint(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { cut := &nameServerScraper{nameServers: tc.nameServers} - cut.AddEndpoint(objectKey, domain, values) + cut.AddEndpoint(testDNSZone(), domain, values) assert.Equal(t, tc.expectedNameServers, cut.nameServers, "unexpected changes to name servers") }) } @@ -260,8 +266,8 @@ func TestRemoveEndpoint(t *testing.T) { nameServers: rootDomainsMap{ rootDomain: nameServersMap{ domain: endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("test-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value"), }, }, }, @@ -275,8 +281,8 @@ func TestRemoveEndpoint(t *testing.T) { rootDomain: nameServersMap{ "other.domain.com": endpointState{}, domain: endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("test-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value"), }, }, "other-domain": nameServersMap{ @@ -414,8 +420,8 @@ func TestScrape(t *testing.T) { nameServers: rootDomainsMap{ "domain.com": nameServersMap{ "test.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("old-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("old-value"), }, }, }, @@ -431,8 +437,8 @@ func TestScrape(t *testing.T) { expectedNameServers: rootDomainsMap{ "domain.com": nameServersMap{ "test.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("test-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value"), }, }, }, @@ -444,8 +450,8 @@ func TestScrape(t *testing.T) { nameServers: rootDomainsMap{ "domain.com": nameServersMap{ "test.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("test-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value"), }, }, }, @@ -461,8 +467,8 @@ func TestScrape(t *testing.T) { expectedNameServers: rootDomainsMap{ "domain.com": nameServersMap{ "test.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-name"}, - nsValues: sets.NewString("test-value"), + dnsZone: testDNSZone(), + nsValues: sets.NewString("test-value"), }, }, }, @@ -473,20 +479,20 @@ func TestScrape(t *testing.T) { nameServers: rootDomainsMap{ "domain.com": nameServersMap{ "changed-1.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-1"}, - nsValues: sets.NewString("old-value-1"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-1"), + nsValues: sets.NewString("old-value-1"), }, "changed-2.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-2"}, - nsValues: sets.NewString("old-value-2"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-2"), + nsValues: sets.NewString("old-value-2"), }, "changed-3.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-3"}, - nsValues: sets.NewString("old-value-3a", "old-value-3b"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-3"), + nsValues: sets.NewString("old-value-3a", "old-value-3b"), }, "unchanged.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-unchanged"}, - nsValues: sets.NewString("test-value-4"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-unchanged"), + nsValues: sets.NewString("test-value-4"), }, }, }, @@ -506,20 +512,20 @@ func TestScrape(t *testing.T) { expectedNameServers: rootDomainsMap{ "domain.com": nameServersMap{ "changed-1.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-1"}, - nsValues: sets.NewString("test-value-1"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-1"), + nsValues: sets.NewString("test-value-1"), }, "changed-2.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-2"}, - nsValues: sets.NewString("test-value-2a", "test-value-2b"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-2"), + nsValues: sets.NewString("test-value-2a", "test-value-2b"), }, "changed-3.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-changed-3"}, - nsValues: sets.NewString("test-value-3"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-changed-3"), + nsValues: sets.NewString("test-value-3"), }, "unchanged.domain.com": endpointState{ - objectKey: client.ObjectKey{Namespace: "test-namespace", Name: "test-unchanged"}, - nsValues: sets.NewString("test-value-4"), + dnsZone: testDNSZoneWithNSName(testNamespace, "test-unchanged"), + nsValues: sets.NewString("test-value-4"), }, }, }, @@ -536,9 +542,9 @@ func TestScrape(t *testing.T) { defer mockCtrl.Finish() mockQuery := mock.NewMockQuery(mockCtrl) tc.configureQuery(mockQuery) - changeNotifications := make(chan client.ObjectKey, 100) - notifyChange := func(objectKey client.ObjectKey) { - changeNotifications <- objectKey + changeNotifications := make(chan client.Object, 100) + notifyChange := func(object client.Object) { + changeNotifications <- object } cut := newNameServerScraper(log.StandardLogger(), mockQuery, tc.rootDomains, notifyChange) if tc.scrapePeriod > 0 { @@ -547,16 +553,16 @@ func TestScrape(t *testing.T) { if tc.nameServers != nil { cut.nameServers = tc.nameServers } - stop := make(chan struct{}) + ctx, stop := context.WithCancel(context.Background()) go func() { sleepTime := tc.testDuration if sleepTime <= 0 { sleepTime = 3 * time.Second } time.Sleep(sleepTime) - stop <- struct{}{} + stop() }() - err := cut.Start(stop) + err := cut.Start(ctx) assert.NoError(t, err, "unexpected error starting scraper") expectedNameServers := tc.expectedNameServers if len(expectedNameServers) == 0 { @@ -566,7 +572,7 @@ func TestScrape(t *testing.T) { } } assert.Equal(t, expectedNameServers, cut.nameServers, "unexpected changes to name servers") - actualChanges := []client.ObjectKey{} + actualChanges := []client.Object{} for { empty := false select { @@ -582,7 +588,17 @@ func TestScrape(t *testing.T) { if len(tc.expectedChanges) == 0 { assert.Empty(t, actualChanges, "expected no change notifications") } else { - assert.ElementsMatch(t, tc.expectedChanges, actualChanges, "unexpected change notifications") + assert.Equal(t, len(tc.expectedChanges), len(actualChanges), "unexpected change count") + for _, expectedChangedDNSZone := range tc.expectedChanges { + found := false + for _, actualChangedDNSZone := range actualChanges { + if actualChangedDNSZone.GetNamespace() == expectedChangedDNSZone.Namespace && + actualChangedDNSZone.GetName() == expectedChangedDNSZone.Name { + found = true + } + } + assert.True(t, found, "expected change to DNSZone %s did not occur", expectedChangedDNSZone.Name) + } } }) } diff --git a/pkg/controller/dnszone/dnszone_controller.go b/pkg/controller/dnszone/dnszone_controller.go index 4be8e3feb2a..218d56434b0 100644 --- a/pkg/controller/dnszone/dnszone_controller.go +++ b/pkg/controller/dnszone/dnszone_controller.go @@ -127,7 +127,7 @@ type ReconcileDNSZone struct { // Reconcile reads that state of the cluster for a DNSZone object and makes changes based on the state read // and what is in the DNSZone.Spec -func (r *ReconcileDNSZone) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileDNSZone) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { dnsLog := controllerutils.BuildControllerLogger(ControllerName, "dnsZone", request.NamespacedName) dnsLog.Info("reconciling dns zone") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, dnsLog) diff --git a/pkg/controller/hibernation/hibernation_controller.go b/pkg/controller/hibernation/hibernation_controller.go index 75ea99ecd16..af30f17bf3e 100644 --- a/pkg/controller/hibernation/hibernation_controller.go +++ b/pkg/controller/hibernation/hibernation_controller.go @@ -129,7 +129,7 @@ func AddToManager(mgr manager.Manager, r *hibernationReconciler, concurrentRecon } // Reconcile syncs a single ClusterDeployment -func (r *hibernationReconciler) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) { +func (r *hibernationReconciler) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/hibernation/hibernation_controller_test.go b/pkg/controller/hibernation/hibernation_controller_test.go index 41d6bee2801..e905623517d 100644 --- a/pkg/controller/hibernation/hibernation_controller_test.go +++ b/pkg/controller/hibernation/hibernation_controller_test.go @@ -371,7 +371,7 @@ func TestReconcile(t *testing.T) { }, csrUtil: mockCSRHelper, } - _, err := reconciler.Reconcile(reconcile.Request{ + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{Namespace: namespace, Name: cdName}, }) @@ -553,7 +553,7 @@ func TestHibernateAfter(t *testing.T) { }, csrUtil: mockCSRHelper, } - result, err := reconciler.Reconcile(reconcile.Request{ + result, err := reconciler.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{Namespace: namespace, Name: cdName}, }) diff --git a/pkg/controller/machinemanagement/machinemanagement_controller.go b/pkg/controller/machinemanagement/machinemanagement_controller.go index 906f8e753bd..2f1d2889ecd 100644 --- a/pkg/controller/machinemanagement/machinemanagement_controller.go +++ b/pkg/controller/machinemanagement/machinemanagement_controller.go @@ -100,7 +100,7 @@ type ReconcileMachineManagement struct { // Reconcile reads settings within ClusterDeployment.Spec.MachineManagement and creates/copies resources necessary for // managing machines centrally when requested. -func (r *ReconcileMachineManagement) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) { +func (r *ReconcileMachineManagement) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/machinemanagement/machinemanagement_controller_test.go b/pkg/controller/machinemanagement/machinemanagement_controller_test.go index 62029d9e13f..dd01d1c56b8 100644 --- a/pkg/controller/machinemanagement/machinemanagement_controller_test.go +++ b/pkg/controller/machinemanagement/machinemanagement_controller_test.go @@ -182,7 +182,7 @@ func TestMachineManagementReconcile(t *testing.T) { }, } - result, err := rcd.Reconcile(reconcileRequest) + result, err := rcd.Reconcile(context.TODO(), reconcileRequest) if test.validate != nil { test.validate(fakeClient, t) diff --git a/pkg/controller/metrics/metrics.go b/pkg/controller/metrics/metrics.go index 723c8afba19..59aab1c9180 100644 --- a/pkg/controller/metrics/metrics.go +++ b/pkg/controller/metrics/metrics.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -174,11 +173,11 @@ type Calculator struct { } // Start begins the metrics calculation loop. -func (mc *Calculator) Start(stopCh <-chan struct{}) error { +func (mc *Calculator) Start(ctx context.Context) error { log.Info("started metrics calculator goroutine") // Run forever, sleep at the end: - wait.Until(func() { + wait.UntilWithContext(ctx, func(ctx context.Context) { mcLog := log.WithField("controller", "metrics") recobsrv := NewReconcileObserver(ControllerName, mcLog) defer recobsrv.ObserveControllerReconcileTime() @@ -186,7 +185,7 @@ func (mc *Calculator) Start(stopCh <-chan struct{}) error { mcLog.Info("calculating metrics across all ClusterDeployments") // Load all ClusterDeployments so we can accumulate facts about them. clusterDeployments := &hivev1.ClusterDeploymentList{} - err := mc.Client.List(context.Background(), clusterDeployments) + err := mc.Client.List(ctx, clusterDeployments) if err != nil { log.WithError(err).Error("error listing cluster deployments") } else { @@ -257,7 +256,7 @@ func (mc *Calculator) Start(stopCh <-chan struct{}) error { // install job metrics installJobs := &batchv1.JobList{} installJobLabelSelector := map[string]string{constants.InstallJobLabel: "true"} - err = mc.Client.List(context.Background(), installJobs, client.MatchingLabels(installJobLabelSelector)) + err = mc.Client.List(ctx, installJobs, client.MatchingLabels(installJobLabelSelector)) if err != nil { log.WithError(err).Error("error listing install jobs") } else { @@ -277,7 +276,7 @@ func (mc *Calculator) Start(stopCh <-chan struct{}) error { // uninstall job metrics uninstallJobs := &batchv1.JobList{} uninstallJobLabelSelector := map[string]string{constants.UninstallJobLabel: "true"} - err = mc.Client.List(context.Background(), uninstallJobs, client.MatchingLabels(uninstallJobLabelSelector)) + err = mc.Client.List(ctx, uninstallJobs, client.MatchingLabels(uninstallJobLabelSelector)) if err != nil { log.WithError(err).Error("error listing uninstall jobs") } else { @@ -297,7 +296,7 @@ func (mc *Calculator) Start(stopCh <-chan struct{}) error { // imageset job metrics imagesetJobs := &batchv1.JobList{} imagesetJobLabelSelector := map[string]string{imageset.ImagesetJobLabel: "true"} - err = mc.Client.List(context.Background(), imagesetJobs, client.MatchingLabels(imagesetJobLabelSelector)) + err = mc.Client.List(ctx, imagesetJobs, client.MatchingLabels(imagesetJobLabelSelector)) if err != nil { log.WithError(err).Error("error listing imageset jobs") } else { @@ -314,7 +313,7 @@ func (mc *Calculator) Start(stopCh <-chan struct{}) error { } mc.calculateSelectorSyncSetMetrics(mcLog) - }, mc.Interval, stopCh) + }, mc.Interval) return nil } diff --git a/pkg/controller/remoteingress/remoteingress_controller.go b/pkg/controller/remoteingress/remoteingress_controller.go index 26f9182a1f4..f6f01f9f951 100644 --- a/pkg/controller/remoteingress/remoteingress_controller.go +++ b/pkg/controller/remoteingress/remoteingress_controller.go @@ -130,7 +130,7 @@ type ReconcileRemoteClusterIngress struct { // Reconcile reads that state of the cluster for a ClusterDeployment object and sets up // any needed ClusterIngress objects up for syncing to the remote cluster. // -func (r *ReconcileRemoteClusterIngress) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileRemoteClusterIngress) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/remoteingress/remoteingress_controller_test.go b/pkg/controller/remoteingress/remoteingress_controller_test.go index 35af2173b0c..4e7c355a588 100644 --- a/pkg/controller/remoteingress/remoteingress_controller_test.go +++ b/pkg/controller/remoteingress/remoteingress_controller_test.go @@ -321,7 +321,7 @@ func TestRemoteClusterIngressReconcile(t *testing.T) { logger: log.WithField("controller", ControllerName), kubeCLI: helper, } - _, err := rcd.Reconcile(reconcile.Request{ + _, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testClusterName, Namespace: testNamespace, @@ -419,7 +419,7 @@ func TestRemoteClusterIngressReconcileConditions(t *testing.T) { logger: log.WithField("controller", ControllerName), kubeCLI: helper, } - _, err := rcd.Reconcile(reconcile.Request{ + _, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: testClusterName, Namespace: testNamespace, diff --git a/pkg/controller/remotemachineset/remotemachineset_controller.go b/pkg/controller/remotemachineset/remotemachineset_controller.go index 4119d9363ea..6145a9aecb1 100644 --- a/pkg/controller/remotemachineset/remotemachineset_controller.go +++ b/pkg/controller/remotemachineset/remotemachineset_controller.go @@ -108,9 +108,9 @@ func Add(mgr manager.Manager) error { } // Watch for changes to ClusterDeployment - err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(r.clusterDeploymentWatchHandler), - }) + err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, handler.EnqueueRequestsFromMapFunc( + r.clusterDeploymentWatchHandler, + )) if err != nil { return err } @@ -118,13 +118,13 @@ func Add(mgr manager.Manager) error { return nil } -func (r *ReconcileRemoteMachineSet) clusterDeploymentWatchHandler(a handler.MapObject) []reconcile.Request { +func (r *ReconcileRemoteMachineSet) clusterDeploymentWatchHandler(a client.Object) []reconcile.Request { retval := []reconcile.Request{} - cd := a.Object.(*hivev1.ClusterDeployment) + cd := a.(*hivev1.ClusterDeployment) if cd == nil { // Wasn't a clusterdeployment, bail out. This should not happen. - r.logger.Errorf("Error converting MapObject.Object to ClusterDeployment. Value: %+v", a.Object) + r.logger.Errorf("Error converting MapObject.Object to ClusterDeployment. Value: %+v", a) return retval } @@ -132,7 +132,7 @@ func (r *ReconcileRemoteMachineSet) clusterDeploymentWatchHandler(a handler.MapO err := r.List(context.TODO(), pools) if err != nil { // Could not list machine pools - r.logger.Errorf("Error listing machine pools. Value: %+v", a.Object) + r.logger.Errorf("Error listing machine pools. Value: %+v", a) return retval } @@ -176,7 +176,7 @@ type ReconcileRemoteMachineSet struct { // Reconcile reads that state of the cluster for a MachinePool object and makes changes to the // remote cluster MachineSets based on the state read -func (r *ReconcileRemoteMachineSet) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileRemoteMachineSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "machinePool", request.NamespacedName) logger.Info("reconciling machine pool") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) diff --git a/pkg/controller/remotemachineset/remotemachineset_controller_test.go b/pkg/controller/remotemachineset/remotemachineset_controller_test.go index dde981da864..27e2ee138b9 100644 --- a/pkg/controller/remotemachineset/remotemachineset_controller_test.go +++ b/pkg/controller/remotemachineset/remotemachineset_controller_test.go @@ -660,7 +660,7 @@ func TestRemoteMachineSetReconcile(t *testing.T) { }, expectations: controllerExpectations, } - _, err := rcd.Reconcile(reconcile.Request{ + _, err := rcd.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: fmt.Sprintf("%s-worker", testName), Namespace: testNamespace, diff --git a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go index ab1ae33be76..564b82a0625 100644 --- a/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go +++ b/pkg/controller/syncidentityprovider/syncidentityprovider_controller.go @@ -80,17 +80,15 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci reconciler := r.(*ReconcileSyncIdentityProviders) // Watch for changes to SyncIdentityProvider - err = c.Watch(&source.Kind{Type: &hivev1.SyncIdentityProvider{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(reconciler.syncIdentityProviderWatchHandler), - }) + err = c.Watch(&source.Kind{Type: &hivev1.SyncIdentityProvider{}}, + handler.EnqueueRequestsFromMapFunc(reconciler.syncIdentityProviderWatchHandler)) if err != nil { return err } // Watch for changes to SelectorSyncIdentityProvider - err = c.Watch(&source.Kind{Type: &hivev1.SelectorSyncIdentityProvider{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(reconciler.selectorSyncIdentityProviderWatchHandler), - }) + err = c.Watch(&source.Kind{Type: &hivev1.SelectorSyncIdentityProvider{}}, + handler.EnqueueRequestsFromMapFunc(reconciler.selectorSyncIdentityProviderWatchHandler)) if err != nil { return err } @@ -100,13 +98,13 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci return err } -func (r *ReconcileSyncIdentityProviders) syncIdentityProviderWatchHandler(a handler.MapObject) []reconcile.Request { +func (r *ReconcileSyncIdentityProviders) syncIdentityProviderWatchHandler(a client.Object) []reconcile.Request { retval := []reconcile.Request{} - syncIDP := a.Object.(*hivev1.SyncIdentityProvider) + syncIDP := a.(*hivev1.SyncIdentityProvider) if syncIDP == nil { // Wasn't a SyncIdentityProvider, bail out. This should not happen. - r.logger.Errorf("Error converting MapObject.Object to SyncIdentityProvider. Value: %+v", a.Object) + r.logger.Errorf("Error converting MapObject.Object to SyncIdentityProvider. Value: %+v", a) return retval } @@ -120,13 +118,13 @@ func (r *ReconcileSyncIdentityProviders) syncIdentityProviderWatchHandler(a hand return retval } -func (r *ReconcileSyncIdentityProviders) selectorSyncIdentityProviderWatchHandler(a handler.MapObject) []reconcile.Request { +func (r *ReconcileSyncIdentityProviders) selectorSyncIdentityProviderWatchHandler(a client.Object) []reconcile.Request { retval := []reconcile.Request{} - ssidp := a.Object.(*hivev1.SelectorSyncIdentityProvider) + ssidp := a.(*hivev1.SelectorSyncIdentityProvider) if ssidp == nil { // Wasn't a SelectorSyncIdentityProvider, bail out. This should not happen. - r.logger.Errorf("Error converting MapObject.Object to SelectorSyncIdentityProvider. Value: %+v", a.Object) + r.logger.Errorf("Error converting MapObject.Object to SelectorSyncIdentityProvider. Value: %+v", a) return retval } @@ -174,7 +172,7 @@ type identityProviderPatchSpec struct { // Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes to the // remote cluster MachineSets based on the state read and the worker machines defined in // ClusterDeployment.Spec.Config.Machines -func (r *ReconcileSyncIdentityProviders) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileSyncIdentityProviders) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { contextLogger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) contextLogger.Info("reconciling syncidentityproviders and clusterdeployments") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, contextLogger) diff --git a/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go b/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go index 55aeee5c916..ee364524059 100644 --- a/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go +++ b/pkg/controller/syncidentityprovider/syncidentityprovider_controller_test.go @@ -15,11 +15,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" - handler "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/openshift/hive/apis" hivev1 "github.com/openshift/hive/apis/hive/v1" + "github.com/openshift/hive/pkg/constants" ) @@ -197,8 +197,7 @@ func TestSyncIdentityProviderWatchHandler(t *testing.T) { } // Act - actualRequestList := reconciler.syncIdentityProviderWatchHandler(handler.MapObject{ - Object: test.syncIdentityProvider}) + actualRequestList := reconciler.syncIdentityProviderWatchHandler(test.syncIdentityProvider) // Assert assert.True(t, reflect.DeepEqual(test.expectedRequestList, actualRequestList)) @@ -256,8 +255,7 @@ func TestSelectorSyncIdentityProviderWatchHandler(t *testing.T) { } // Act - actualRequestList := r.selectorSyncIdentityProviderWatchHandler(handler.MapObject{ - Object: test.selectorSyncIdentityProvider}) + actualRequestList := r.selectorSyncIdentityProviderWatchHandler(test.selectorSyncIdentityProvider) // Assert assert.True(t, reflect.DeepEqual(test.expectedRequestList, actualRequestList)) @@ -462,7 +460,7 @@ func TestReconcile(t *testing.T) { } // Act - result, err := r.Reconcile(reconcile.Request{ + result, err := r.Reconcile(context.TODO(), reconcile.Request{ NamespacedName: types.NamespacedName{ Name: test.watchedObjectName, Namespace: test.watchedObjectNamespace, diff --git a/pkg/controller/unreachable/unreachable_controller.go b/pkg/controller/unreachable/unreachable_controller.go index 43241c69516..aebdcc374dd 100644 --- a/pkg/controller/unreachable/unreachable_controller.go +++ b/pkg/controller/unreachable/unreachable_controller.go @@ -112,7 +112,7 @@ type ReconcileRemoteMachineSet struct { } // Reconcile checks if we can establish an API client connection to the remote cluster and maintains the unreachable condition as a result. -func (r *ReconcileRemoteMachineSet) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileRemoteMachineSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) cdLog.Info("reconciling cluster deployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog) diff --git a/pkg/controller/unreachable/unreachable_controller_test.go b/pkg/controller/unreachable/unreachable_controller_test.go index d6853a4d5ed..70e059085b6 100644 --- a/pkg/controller/unreachable/unreachable_controller_test.go +++ b/pkg/controller/unreachable/unreachable_controller_test.go @@ -232,7 +232,7 @@ func TestReconcile(t *testing.T) { Namespace: testNamespace, } - result, err := rcd.Reconcile(reconcile.Request{NamespacedName: namespacedName}) + result, err := rcd.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) assert.NoError(t, err, "unexpected error during reconcile") cd := &hivev1.ClusterDeployment{} diff --git a/pkg/controller/utils/clientwrapper.go b/pkg/controller/utils/clientwrapper.go index e67c3a885cb..d6c7da772e4 100644 --- a/pkg/controller/utils/clientwrapper.go +++ b/pkg/controller/utils/clientwrapper.go @@ -70,14 +70,15 @@ func NewClientWithMetricsOrDie(mgr manager.Manager, ctrlrName hivev1.ControllerN log.WithError(err).Fatal("unable to initialize metrics wrapped client") } - return &client.DelegatingClient{ - Reader: &client.DelegatingReader{ - CacheReader: mgr.GetCache(), - ClientReader: c, - }, - Writer: c, - StatusClient: c, + dc, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: mgr.GetCache(), + Client: c, + }) + if err != nil { + log.WithError(err).Fatal("unable to initialize metrics wrapped client") } + + return dc } // AddControllerMetricsTransportWrapper adds a transport wrapper to the given rest config which diff --git a/pkg/controller/utils/dnszone.go b/pkg/controller/utils/dnszone.go index 6de3dbfb64f..fa71624e40a 100644 --- a/pkg/controller/utils/dnszone.go +++ b/pkg/controller/utils/dnszone.go @@ -15,15 +15,16 @@ import ( ) func EnqueueDNSZonesOwnedByClusterDeployment(c client.Client, logger log.FieldLogger) handler.EventHandler { - return &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(func(mapObj handler.MapObject) []reconcile.Request { + + return handler.EnqueueRequestsFromMapFunc(func(mapObj client.Object) []reconcile.Request { dnsZones := &hivev1.DNSZoneList{} if err := c.List( context.TODO(), dnsZones, - client.InNamespace(mapObj.Meta.GetNamespace()), + client.InNamespace(mapObj.GetNamespace()), client.MatchingLabels{ constants.DNSZoneTypeLabel: constants.DNSZoneTypeChild, - constants.ClusterDeploymentNameLabel: mapObj.Meta.GetName(), + constants.ClusterDeploymentNameLabel: mapObj.GetName(), }, ); err != nil { logger.WithError(err).Log(LogLevel(err), "could not list DNS zones owned by ClusterDeployment") @@ -31,15 +32,10 @@ func EnqueueDNSZonesOwnedByClusterDeployment(c client.Client, logger log.FieldLo } requests := make([]reconcile.Request, len(dnsZones.Items)) for i, dnsZone := range dnsZones.Items { - request, err := client.ObjectKeyFromObject(&dnsZone) - if err != nil { - logger.WithError(err).Error("could not get object key for DNS zone") - continue - } - requests[i] = reconcile.Request{NamespacedName: request} + requests[i] = reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&dnsZone)} } return requests - })} + }) } // ReconcileDNSZoneForRelocation performs reconciliation on a DNSZone that is in the midst of a relocation to a new diff --git a/pkg/controller/utils/loggingeventhandler.go b/pkg/controller/utils/loggingeventhandler.go index 9d287c0177b..3f106d74bd6 100644 --- a/pkg/controller/utils/loggingeventhandler.go +++ b/pkg/controller/utils/loggingeventhandler.go @@ -33,28 +33,28 @@ type loggingEventHandler struct { // Create implements handler.EventHandler func (h *loggingEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { - logger := h.loggerForEvent("create", e.Meta) + logger := h.loggerForEvent("create", e.Object) logger.Debug("Handling event") h.eventHandler.Create(e, wrapQueueWithLogging(q, logger)) } // Delete implements handler.EventHandler func (h *loggingEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - logger := h.loggerForEvent("delete", e.Meta) + logger := h.loggerForEvent("delete", e.Object) logger.Debug("Handling event") h.eventHandler.Delete(e, wrapQueueWithLogging(q, logger)) } // Update implements handler.EventHandler func (h *loggingEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - logger := h.loggerForEvent("update", e.MetaNew) + logger := h.loggerForEvent("update", e.ObjectNew) logger.Debug("Handling event") h.eventHandler.Update(e, wrapQueueWithLogging(q, logger)) } // Generic implements handler.EventHandler func (h *loggingEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { - logger := h.loggerForEvent("generic", e.Meta) + logger := h.loggerForEvent("generic", e.Object) logger.Debug("Handling event") h.eventHandler.Generic(e, wrapQueueWithLogging(q, logger)) } diff --git a/pkg/controller/utils/ownership.go b/pkg/controller/utils/ownership.go index d88d058fcab..9469bc34442 100644 --- a/pkg/controller/utils/ownership.go +++ b/pkg/controller/utils/ownership.go @@ -22,7 +22,7 @@ import ( // OwnershipUniqueKey contains the uniquly identifiable pattern for ensuring ownership labels are correct applied for a type. type OwnershipUniqueKey struct { LabelSelector map[string]string - TypeToList runtime.Object + TypeToList client.ObjectList Controlled bool } @@ -31,7 +31,7 @@ func ReconcileOwnerReferences(owner hivev1.MetaRuntimeObject, ownershipKeys []*O errlist := []error{} for _, ownershipKey := range ownershipKeys { - objects, err := ListRuntimeObjects(kubeclient, []runtime.Object{ownershipKey.TypeToList}, client.MatchingLabels(ownershipKey.LabelSelector), client.InNamespace(owner.GetNamespace())) + objects, err := ListRuntimeObjects(kubeclient, []client.ObjectList{ownershipKey.TypeToList}, client.MatchingLabels(ownershipKey.LabelSelector), client.InNamespace(owner.GetNamespace())) if err != nil { errlist = append(errlist, errors.Wrap(err, "failed listing objects owned by clusterdeployment according to label")) continue diff --git a/pkg/controller/utils/ownership_test.go b/pkg/controller/utils/ownership_test.go index 2b7729e6444..a5ff2931643 100644 --- a/pkg/controller/utils/ownership_test.go +++ b/pkg/controller/utils/ownership_test.go @@ -348,7 +348,7 @@ func TestReconcile(t *testing.T) { // Act err := ReconcileOwnerReferences(test.owner, test.ownershipUniqueKeys, fakeKubeClient, testscheme, logger) - actualObjects, listErr := ListRuntimeObjects(fakeKubeClient, []runtime.Object{test.listRuntimeObjectsOwnershipUniqueKey.TypeToList}, client.MatchingLabels(test.listRuntimeObjectsOwnershipUniqueKey.LabelSelector)) + actualObjects, listErr := ListRuntimeObjects(fakeKubeClient, []client.ObjectList{test.listRuntimeObjectsOwnershipUniqueKey.TypeToList}, client.MatchingLabels(test.listRuntimeObjectsOwnershipUniqueKey.LabelSelector)) // Assert assert.NoError(t, err, "Unexpected error from ReconcileOwnerReferences") diff --git a/pkg/controller/utils/utils.go b/pkg/controller/utils/utils.go index 09e3a9435c9..3c10ec56cd9 100644 --- a/pkg/controller/utils/utils.go +++ b/pkg/controller/utils/utils.go @@ -245,11 +245,11 @@ func LogLevel(err error) log.Level { } // ListRuntimeObjects returns a slice of runtime objects returned from the kubernetes client based on the passed in list of types to return and list options. -func ListRuntimeObjects(c client.Client, typesToList []runtime.Object, opts ...client.ListOption) ([]runtime.Object, error) { +func ListRuntimeObjects(c client.Client, typesToList []client.ObjectList, opts ...client.ListOption) ([]runtime.Object, error) { nsObjects := []runtime.Object{} for _, t := range typesToList { - listObj := t.DeepCopyObject() + listObj := t.DeepCopyObject().(client.ObjectList) if err := c.List(context.TODO(), listObj, opts...); err != nil { return nil, err } diff --git a/pkg/controller/velerobackup/velerobackup_controller.go b/pkg/controller/velerobackup/velerobackup_controller.go index dc0ad8ea052..d10143ebbd0 100644 --- a/pkg/controller/velerobackup/velerobackup_controller.go +++ b/pkg/controller/velerobackup/velerobackup_controller.go @@ -57,7 +57,7 @@ var ( &hivev1.DNSZone{}, } - hiveNamespaceScopedListTypes = []runtime.Object{ + hiveNamespaceScopedListTypes = []client.ObjectList{ &hivev1.ClusterDeploymentList{}, &hivev1.SyncSetList{}, &hivev1.DNSZoneList{}, @@ -142,12 +142,14 @@ func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconci func (r *ReconcileBackup) registerHiveObjectWatches(c controller.Controller) error { for _, t := range hiveNamespaceScopedTypesToWatch { - if err := c.Watch(&source.Kind{Type: t.DeepCopyObject()}, &handler.EnqueueRequestsFromMapFunc{ - // Queue up the NS for this Hive Object - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { - return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: a.Meta.GetNamespace()}}} - }), - }); err != nil { + err := c.Watch(&source.Kind{Type: t.DeepCopyObject().(client.Object)}, handler.EnqueueRequestsFromMapFunc( + func(mapObj client.Object) []reconcile.Request { + // Queue up the NS for this Hive Object + return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: mapObj.GetNamespace()}}} + }, + ), + ) + if err != nil { return err } } @@ -168,7 +170,7 @@ type ReconcileBackup struct { } // Reconcile ensures that all Hive object changes have corresponding Velero backup objects. -func (r *ReconcileBackup) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileBackup) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { nsLogger := controllerutils.BuildControllerLogger(ControllerName, "namespace", request.NamespacedName) nsLogger.Info("reconciling backups and Hive object changes") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, nsLogger) diff --git a/pkg/controller/velerobackup/velerobackup_controller_test.go b/pkg/controller/velerobackup/velerobackup_controller_test.go index 42c7b83fb32..a4755a75e81 100644 --- a/pkg/controller/velerobackup/velerobackup_controller_test.go +++ b/pkg/controller/velerobackup/velerobackup_controller_test.go @@ -271,7 +271,7 @@ func TestReconcile(t *testing.T) { tolerance := float64(10 * time.Second) // Act - actualResult, actualError := r.Reconcile(test.request) + actualResult, actualError := r.Reconcile(context.TODO(), test.request) actualObjects, err := controllerutils.ListRuntimeObjects(r, types, client.InNamespace(namespace)) lastBackupName, lastBackupTimestamp := ignoreUncomparedFields(test.expectedObjects, actualObjects) diff --git a/pkg/installmanager/installmanager.go b/pkg/installmanager/installmanager.go index ec397a509c2..b75a3a4dfd6 100644 --- a/pkg/installmanager/installmanager.go +++ b/pkg/installmanager/installmanager.go @@ -1233,7 +1233,7 @@ func uploadAdminPassword(provision *hivev1.ClusterProvision, m *InstallManager) return s, nil } -func createWithRetries(obj runtime.Object, m *InstallManager) error { +func createWithRetries(obj client.Object, m *InstallManager) error { logger := m.log.WithField("kind", obj.GetObjectKind().GroupVersionKind().Kind) metaObj, err := meta.Accessor(obj) @@ -1309,7 +1309,7 @@ func waitForProvisioningStage(provision *hivev1.ClusterProvision, m *InstallMana if err != nil { return errors.Wrap(err, "could not get the GVK for clusterprovisions") } - restClient, err := apiutil.RESTClientForGVK(gvk, config, scheme.Codecs) + restClient, err := apiutil.RESTClientForGVK(gvk, false, config, scheme.Codecs) if err != nil { return errors.Wrap(err, "could not create REST client") } diff --git a/pkg/operator/hive/hive_controller.go b/pkg/operator/hive/hive_controller.go index fbcdb985dc2..52167e21fa0 100644 --- a/pkg/operator/hive/hive_controller.go +++ b/pkg/operator/hive/hive_controller.go @@ -137,7 +137,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { mgr.Add(&informerRunnable{informer: configMapInformer}) // Watch for changes to cm/kube-apiserver-aggregator-client-ca in the OpenShift managed namespace - err = c.Watch(&source.Informer{Informer: configMapInformer}, &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(aggregatorCAConfigMapHandler)}) + err = c.Watch(&source.Informer{Informer: configMapInformer}, handler.EnqueueRequestsFromMapFunc(handler.MapFunc(aggregatorCAConfigMapHandler))) if err != nil { return err } @@ -235,7 +235,7 @@ type ReconcileHiveConfig struct { // Reconcile reads that state of the cluster for a Hive object and makes changes based on the state read // and what is in the Hive.Spec -func (r *ReconcileHiveConfig) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileHiveConfig) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { hLog := log.WithField("controller", "hive") hLog.Info("Reconciling Hive components") @@ -427,12 +427,12 @@ func (r *ReconcileHiveConfig) establishSecretWatch(hLog *log.Entry, hiveNSName s }, }, predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - hLog.WithField("predicateResponse", e.Meta.GetName() == hiveAdmissionServingCertSecretName).Debug("secret CreateEvent") - return e.Meta.GetName() == hiveAdmissionServingCertSecretName + hLog.WithField("predicateResponse", e.Object.GetName() == hiveAdmissionServingCertSecretName).Debug("secret CreateEvent") + return e.Object.GetName() == hiveAdmissionServingCertSecretName }, UpdateFunc: func(e event.UpdateEvent) bool { - hLog.WithField("predicateResponse", e.MetaNew.GetName() == hiveAdmissionServingCertSecretName).Debug("secret UpdateEvent") - return e.MetaNew.GetName() == hiveAdmissionServingCertSecretName + hLog.WithField("predicateResponse", e.ObjectNew.GetName() == hiveAdmissionServingCertSecretName).Debug("secret UpdateEvent") + return e.ObjectNew.GetName() == hiveAdmissionServingCertSecretName }, }) if err != nil { @@ -464,14 +464,15 @@ type informerRunnable struct { informer cache.SharedIndexInformer } -func (r *informerRunnable) Start(stopch <-chan struct{}) error { +func (r *informerRunnable) Start(ctx context.Context) error { + stopch := ctx.Done() r.informer.Run(stopch) cache.WaitForCacheSync(stopch, r.informer.HasSynced) return nil } -func aggregatorCAConfigMapHandler(o handler.MapObject) []reconcile.Request { - if o.Meta.GetName() == aggregatorCAConfigMapName { +func aggregatorCAConfigMapHandler(o client.Object) []reconcile.Request { + if o.GetName() == aggregatorCAConfigMapName { return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: hiveConfigName}}} } return nil diff --git a/pkg/test/manager/mock/manager_generated.go b/pkg/test/manager/mock/manager_generated.go index bfb991c3f78..8fd28bd3991 100644 --- a/pkg/test/manager/mock/manager_generated.go +++ b/pkg/test/manager/mock/manager_generated.go @@ -5,6 +5,7 @@ package mock import ( + context "context" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" meta "k8s.io/apimachinery/pkg/api/meta" @@ -268,7 +269,7 @@ func (mr *MockManagerMockRecorder) SetFields(arg0 interface{}) *gomock.Call { } // Start mocks base method -func (m *MockManager) Start(arg0 <-chan struct{}) error { +func (m *MockManager) Start(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start", arg0) ret0, _ := ret[0].(error) diff --git a/test/e2e/common/machine.go b/test/e2e/common/machine.go index b6998eab1ec..2b103765590 100644 --- a/test/e2e/common/machine.go +++ b/test/e2e/common/machine.go @@ -19,7 +19,6 @@ import ( func WaitForMachines(cfg *rest.Config, testFunc func([]*machinev1.Machine) bool, timeOut time.Duration) error { logger := log.WithField("client", "machine") logger.Infof("Waiting for Machine") - stop := make(chan struct{}) done := make(chan struct{}) scheme := runtime.NewScheme() err := machinev1.SchemeBuilder.AddToScheme(scheme) @@ -55,8 +54,9 @@ func WaitForMachines(cfg *rest.Config, testFunc func([]*machinev1.Machine) bool, DeleteFunc: func(obj interface{}) { onUpdate() }, }) - go internalCache.Start(stop) - defer func() { stop <- struct{}{} }() + ctx, stop := context.WithCancel(context.Background()) + go internalCache.Start(ctx) + defer stop() select { case <-time.After(timeOut): diff --git a/test/e2e/common/machineset.go b/test/e2e/common/machineset.go index 211faee17f3..3d9bf401866 100644 --- a/test/e2e/common/machineset.go +++ b/test/e2e/common/machineset.go @@ -19,7 +19,6 @@ import ( func WaitForMachineSets(cfg *rest.Config, testFunc func([]*machinev1.MachineSet) bool, timeOut time.Duration) error { logger := log.WithField("client", "machineset") logger.Infof("Waiting for MachineSet") - stop := make(chan struct{}) done := make(chan struct{}) scheme := runtime.NewScheme() err := machinev1.SchemeBuilder.AddToScheme(scheme) @@ -54,8 +53,9 @@ func WaitForMachineSets(cfg *rest.Config, testFunc func([]*machinev1.MachineSet) DeleteFunc: func(obj interface{}) { onUpdate() }, }) - go internalCache.Start(stop) - defer func() { stop <- struct{}{} }() + ctx, stop := context.WithCancel(context.Background()) + go internalCache.Start(ctx) + defer stop() select { case <-time.After(timeOut): diff --git a/test/e2e/common/node.go b/test/e2e/common/node.go index ab2fbdea464..ec7fd37f470 100644 --- a/test/e2e/common/node.go +++ b/test/e2e/common/node.go @@ -17,7 +17,6 @@ import ( func WaitForNodes(cfg *rest.Config, testFunc func([]*corev1.Node) bool, timeOut time.Duration) error { logger := log.WithField("client", "node") logger.Infof("Waiting for Nodes") - stop := make(chan struct{}) done := make(chan struct{}) internalCache, err := cache.New(cfg, cache.Options{}) if err != nil { @@ -45,8 +44,9 @@ func WaitForNodes(cfg *rest.Config, testFunc func([]*corev1.Node) bool, timeOut DeleteFunc: func(obj interface{}) { onUpdate() }, }) - go internalCache.Start(stop) - defer func() { stop <- struct{}{} }() + ctx, stop := context.WithCancel(context.Background()) + go internalCache.Start(ctx) + defer stop() select { case <-time.After(timeOut): diff --git a/test/e2e/postinstall/syncsets/syncsets_suite_test.go b/test/e2e/postinstall/syncsets/syncsets_suite_test.go index e2d402021a2..e7391647536 100644 --- a/test/e2e/postinstall/syncsets/syncsets_suite_test.go +++ b/test/e2e/postinstall/syncsets/syncsets_suite_test.go @@ -449,7 +449,7 @@ func waitForSyncSetApplied(namespace, cdName, syncsetname, syncsettype string) e if err != nil { return err } - restClient, err := apiutil.RESTClientForGVK(gvk, cfg, serializer.NewCodecFactory(scheme.Scheme)) + restClient, err := apiutil.RESTClientForGVK(gvk, false, cfg, serializer.NewCodecFactory(scheme.Scheme)) if err != nil { return err } @@ -491,7 +491,7 @@ func waitForSyncSetDeleted(namespace, syncsetname string) error { if err != nil { return err } - restClient, err := apiutil.RESTClientForGVK(gvk, cfg, serializer.NewCodecFactory(scheme.Scheme)) + restClient, err := apiutil.RESTClientForGVK(gvk, false, cfg, serializer.NewCodecFactory(scheme.Scheme)) if err != nil { return err } @@ -517,7 +517,7 @@ func waitForSyncSetDisassociated(namespace, cdName, syncsetname, syncsettype str if err != nil { return err } - restClient, err := apiutil.RESTClientForGVK(gvk, cfg, serializer.NewCodecFactory(scheme.Scheme)) + restClient, err := apiutil.RESTClientForGVK(gvk, false, cfg, serializer.NewCodecFactory(scheme.Scheme)) if err != nil { return err } diff --git a/test/integration/resource/resource_suite_test.go b/test/integration/resource/resource_suite_test.go index 263bb19d88b..569a5325d59 100644 --- a/test/integration/resource/resource_suite_test.go +++ b/test/integration/resource/resource_suite_test.go @@ -7,18 +7,14 @@ import ( "path/filepath" "testing" - "github.com/onsi/gomega" "github.com/sirupsen/logrus" + "github.com/openshift/hive/apis" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/openshift/hive/apis" ) var cfg *rest.Config @@ -67,24 +63,3 @@ func TestMain(m *testing.M) { t.Stop() os.Exit(code) } - -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - -// StartTestManager adds recFn -func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) chan struct{} { - stop := make(chan struct{}) - go func() { - g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) - }() - return stop -} diff --git a/vendor/github.com/openshift/hive/apis/go.sum b/vendor/github.com/openshift/hive/apis/go.sum index 9ee32435bb3..765c469ce64 100644 --- a/vendor/github.com/openshift/hive/apis/go.sum +++ b/vendor/github.com/openshift/hive/apis/go.sum @@ -18,7 +18,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -45,7 +44,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -143,7 +141,6 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -181,7 +178,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -190,7 +186,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/openshift/hive/apis/scheme/scheme.go b/vendor/github.com/openshift/hive/apis/scheme/scheme.go index ada21321360..0afe1edf572 100644 --- a/vendor/github.com/openshift/hive/apis/scheme/scheme.go +++ b/vendor/github.com/openshift/hive/apis/scheme/scheme.go @@ -49,7 +49,7 @@ limitations under the License. // } // // func main() { -// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ // Scheme: scheme, // }) // // ... @@ -69,7 +69,7 @@ type Builder struct { runtime.SchemeBuilder } -// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. func (bld *Builder) Register(object ...runtime.Object) *Builder { bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { scheme.AddKnownTypes(bld.GroupVersion, object...) diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index 647b4ee431f..cfdc69f413e 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -9,8 +9,8 @@ env: matrix: include: - - go: 1.12.x - go: 1.13.x + - go: 1.14.x env: LINT=1 script: diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 6e28b6ea49a..aeff90e4ea5 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + ## 1.14.1 (14 Mar 2020) Bugfixes: @@ -379,3 +395,7 @@ upgrade to the upcoming stable release. [#791]: https://github.com/uber-go/zap/pull/791 [#795]: https://github.com/uber-go/zap/pull/795 [#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index eae1d237f8e..192fd1a9474 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -32,10 +32,14 @@ import ( // global CPU and I/O load that logging puts on your process while attempting // to preserve a representative subset of your logs. // -// Values configured here are per-second. See zapcore.NewSampler for details. +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` } // Config offers a declarative way to construct a logger. It doesn't do @@ -208,9 +212,19 @@ func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { opts = append(opts, AddStacktrace(stackLevel)) } - if cfg.Sampling != nil { + if scfg := cfg.Sampling; scfg != nil { opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) })) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 83c1ea245a2..dd558fc231b 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -32,6 +32,11 @@ import ( // improves the navigability of this package's API documentation. type Field = zapcore.Field +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + // Skip constructs a no-op field, which is often useful when handling invalid // inputs in other Field constructors. func Skip() Field { @@ -339,6 +344,9 @@ func Stringer(key string, val fmt.Stringer) Field { // Time constructs a Field with the given key and value. The encoder // controls how the time is serialized. func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index dd3b6b2b2f3..59f1b54a04f 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -87,10 +87,17 @@ func Development() Option { } // AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. +// and line number of zap's caller. See also WithCaller. func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller, or not, depending on the value of enabled. +// This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { return optionFunc(func(log *Logger) { - log.addCaller = true + log.addCaller = enabled }) } diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index ae772e4a170..6e05f831ff5 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -65,8 +65,11 @@ const ( Int8Type // StringType indicates that the field carries a string. StringType - // TimeType indicates that the field carries a time.Time. + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType // Uint64Type indicates that the field carries a uint64. Uint64Type // Uint32Type indicates that the field carries a uint32. @@ -145,6 +148,8 @@ func (f Field) AddTo(enc ObjectEncoder) { // Fall back to UTC if location is nil. enc.AddTime(f.Key, time.Unix(0, f.Integer)) } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) case Uint64Type: enc.AddUint64(f.Key, uint64(f.Integer)) case Uint32Type: diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go index a42135c159d..5a1749261ab 100644 --- a/vendor/go.uber.org/zap/zapcore/increase_level.go +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -23,8 +23,7 @@ package zapcore import "fmt" type levelFilterCore struct { - Core - + core Core level LevelEnabler } @@ -46,10 +45,22 @@ func (c *levelFilterCore) Enabled(lvl Level) bool { return c.level.Enabled(lvl) } +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { if !c.Enabled(ent.Level) { return ce } - return c.Core.Check(ent, ce) + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index e3164186367..25f10ca1d75 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -81,33 +81,104 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { return 1 } -type sampler struct { - Core +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 - counts *counters - tick time.Duration - first, thereafter uint64 +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) } -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. // // Zap samples by logging the first N entries with a given level and message // each tick. If more Entries with the same level and message are seen during // the same interval, every Mth message is logged and the rest are dropped. // +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// // Keep in mind that zap's sampling implementation is optimized for speed over // absolute precision; under load, each tick may be slightly over- or // under-sampled. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ Core: core, tick: tick, counts: newCounters(), first: uint64(first), thereafter: uint64(thereafter), + hook: nopSamplingHook, } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) } func (s *sampler) With(fields []Field) Core { @@ -117,6 +188,7 @@ func (s *sampler) With(fields []Field) Core { counts: s.counts, first: s.first, thereafter: s.thereafter, + hook: s.hook, } } @@ -128,7 +200,9 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { counter := s.counts.get(ent.Level, ent.Message) n := counter.IncCheckReset(ent.Time, s.tick) if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) return ce } + s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index e7cb7d6da50..b8ae4456d27 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -47,8 +47,8 @@ func (a ByPath) Len() int { return len(a) } func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } -func NewPatch(operation, path string, value interface{}) Operation { - return Operation{Operation: operation, Path: path, Value: value} +func NewOperation(op, path string, value interface{}) Operation { + return Operation{Operation: op, Path: path, Value: value} } // CreatePatch creates a patch as specified in http://jsonpatch.com/ @@ -162,7 +162,7 @@ func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operat av, ok := a[key] // value was added if !ok { - patch = append(patch, NewPatch("add", p, bv)) + patch = append(patch, NewOperation("add", p, bv)) continue } // Types are the same, compare values @@ -178,7 +178,7 @@ func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operat if !found { p := makePath(path, key) - patch = append(patch, NewPatch("remove", p, nil)) + patch = append(patch, NewOperation("remove", p, nil)) } } return patch, nil @@ -192,10 +192,10 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, // do nothing return patch, nil } else if at == nil && bt != nil { - return append(patch, NewPatch("add", p, bv)), nil + return append(patch, NewOperation("add", p, bv)), nil } else if at != bt { // If types have changed, replace completely (preserves null in destination) - return append(patch, NewPatch("replace", p, bv)), nil + return append(patch, NewOperation("replace", p, bv)), nil } } @@ -209,7 +209,7 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, } case string, float64, bool: if !matchesValue(av, bv) { - patch = append(patch, NewPatch("replace", p, bv)) + patch = append(patch, NewOperation("replace", p, bv)) } case []interface{}: bt := bv.([]interface{}) @@ -218,10 +218,10 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, } else { n := min(len(at), len(bt)) for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewPatch("remove", makePath(p, i), nil)) + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) } for i := n; i < len(bt); i++ { - patch = append(patch, NewPatch("add", makePath(p, i), bt[i])) + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) } for i := 0; i < n; i++ { var err error @@ -313,16 +313,16 @@ func min(x int, y int) int { func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { - op := NewPatch("remove", makePath(p, i-1), nil) + op := NewOperation("remove", makePath(p, i-1), nil) return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) } if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { - op := NewPatch("add", makePath(p, i), t[j-1]) + op := NewOperation("add", makePath(p, i), t[j-1]) return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) } if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { if isBasicType(s[0]) { - op := NewPatch("replace", makePath(p, i-1), t[j-1]) + op := NewOperation("replace", makePath(p, i-1), t[j-1]) return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) } diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go new file mode 100644 index 00000000000..127c39501a1 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects) +// from any Kubernetes compatible resource API. +type Interface interface { + Resource(resource schema.GroupVersionResource) Getter +} + +// ResourceInterface contains the set of methods that may be invoked on objects by their metadata. +// Update is not supported by the server, but Patch can be used for the actions Update would handle. +type ResourceInterface interface { + Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error + DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// Getter handles both namespaced and non-namespaced resource types consistently. +type Getter interface { + Namespace(string) ResourceInterface + ResourceInterface +} diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go new file mode 100644 index 00000000000..72b55799148 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -0,0 +1,307 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/klog/v2" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +// Client allows callers to retrieve the object metadata for any +// Kubernetes-compatible API endpoint. The client uses the +// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently +// retrieve just the necessary metadata, but on older servers +// (Kubernetes 1.14 and before) will retrieve the object and then +// convert the metadata. +type Client struct { + client *rest.RESTClient +} + +var _ Interface = &Client{} + +// ConfigFor returns a copy of the provided config with the +// appropriate metadata client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + +// NewForConfigOrDie creates a new metadata client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +// NewForConfig creates a new metadata client that can retrieve object +// metadata details about any Kubernetes object (core, aggregated, or custom +// resource based) in the form of PartialObjectMetadata objects, or returns +// an error. +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := ConfigFor(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/this-value-should-never-be-sent" + + restClient, err := rest.RESTClientFor(config) + if err != nil { + return nil, err + } + + return &Client{client: restClient}, nil +} + +type client struct { + client *Client + namespace string + resource schema.GroupVersionResource +} + +// Resource returns an interface that can access cluster or namespace +// scoped instances of resource. +func (c *Client) Resource(resource schema.GroupVersionResource) Getter { + return &client{client: c, resource: resource} +} + +// Namespace returns an interface that can access namespace-scoped instances of the +// provided resource. +func (c *client) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// Delete removes the provided resource from the server. +func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(deleteOptionsByte). + Do(ctx) + return result.Error() +} + +// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster). +func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do(ctx) + return result.Error() +} + +// Get returns the resource with name from the specified scope (namespace or cluster). +func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// List returns all resources within the specified scope (namespace or cluster). +func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadataList + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// Watch finds all changes to the resources in the specified scope (namespace or cluster). +func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + Watch(ctx) +} + +// Patch modifies the named resource in the specified scope (namespace or cluster). +func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema") + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +func (c *client) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} + +func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool { + return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0 +} diff --git a/vendor/k8s.io/component-base/config/OWNERS b/vendor/k8s.io/component-base/config/OWNERS new file mode 100644 index 00000000000..11d499d75d9 --- /dev/null +++ b/vendor/k8s.io/component-base/config/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: +- api-approvers +reviewers: +- api-reviewers +- luxas +- mtaufen +- sttts +labels: +- kind/api-change diff --git a/vendor/k8s.io/component-base/config/doc.go b/vendor/k8s.io/component-base/config/doc.go new file mode 100644 index 00000000000..dd0a5a53a7b --- /dev/null +++ b/vendor/k8s.io/component-base/config/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package config // import "k8s.io/component-base/config" diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go new file mode 100644 index 00000000000..f5fef2508b2 --- /dev/null +++ b/vendor/k8s.io/component-base/config/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeconfig is the path to a KubeConfig file. + Kubeconfig string + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string + // contentType is the content type used when sending data to the server from this client. + ContentType string + // qps controls the number of queries per second allowed for this connection. + QPS float32 + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect bool + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration metav1.Duration + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline metav1.Duration + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod metav1.Duration + // resourceLock indicates the resource object type that will be used to lock + // during leader election cycles. + ResourceLock string + // resourceName indicates the name of resource object that will be used to lock + // during leader election cycles. + ResourceName string + // resourceName indicates the namespace of resource object that will be used to lock + // during leader election cycles. + ResourceNamespace string +} + +// DebuggingConfiguration holds configuration for Debugging related features. +type DebuggingConfiguration struct { + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool + // enableContentionProfiling enables lock contention profiling, if + // enableProfiling is true. + EnableContentionProfiling bool +} + +// LoggingConfiguration contains logging options +// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. +type LoggingConfiguration struct { + // Format Flag specifies the structure of log messages. + // default value of format is `text` + Format string + // [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). + // Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) + Sanitization bool +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/conversion.go b/vendor/k8s.io/component-base/config/v1alpha1/conversion.go new file mode 100644 index 00000000000..abf35c6d910 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/conversion.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/component-base/config" +) + +// Important! The public back-and-forth conversion functions for the types in this generic +// package with ComponentConfig types need to be manually exposed like this in order for +// other packages that reference this package to be able to call these conversion functions +// in an autogenerated manner. +// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions +// in autogenerated code as well. + +func Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *config.ClientConnectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(in, out, s) +} + +func Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *config.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error { + return autoConvert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s) +} + +func Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(in *DebuggingConfiguration, out *config.DebuggingConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(in, out, s) +} + +func Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(in *config.DebuggingConfiguration, out *DebuggingConfiguration, s conversion.Scope) error { + return autoConvert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(in, out, s) +} + +func Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *config.LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in, out, s) +} + +func Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *config.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) +} + +func Convert_v1alpha1_LoggingConfiguration_To_config_LoggingConfiguration(in *LoggingConfiguration, out *config.LoggingConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_LoggingConfiguration_To_config_LoggingConfiguration(in, out, s) +} + +func Convert_config_LoggingConfiguration_To_v1alpha1_LoggingConfiguration(in *config.LoggingConfiguration, out *LoggingConfiguration, s conversion.Scope) error { + return autoConvert_config_LoggingConfiguration_To_v1alpha1_LoggingConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/defaults.go b/vendor/k8s.io/component-base/config/v1alpha1/defaults.go new file mode 100644 index 00000000000..098c5739d38 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/defaults.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilpointer "k8s.io/utils/pointer" +) + +// RecommendedDefaultLeaderElectionConfiguration defaults a pointer to a +// LeaderElectionConfiguration struct. This will set the recommended default +// values, but they may be subject to change between API versions. This function +// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo` +// function to allow consumers of this type to set whatever defaults for their +// embedded configs. Forcing consumers to use these defaults would be problematic +// as defaulting in the scheme is done as part of the conversion, and there would +// be no easy way to opt-out. Instead, if you want to use this defaulting method +// run it in your wrapper struct of this type in its `SetDefaults_` method. +func RecommendedDefaultLeaderElectionConfiguration(obj *LeaderElectionConfiguration) { + zero := metav1.Duration{} + if obj.LeaseDuration == zero { + obj.LeaseDuration = metav1.Duration{Duration: 15 * time.Second} + } + if obj.RenewDeadline == zero { + obj.RenewDeadline = metav1.Duration{Duration: 10 * time.Second} + } + if obj.RetryPeriod == zero { + obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second} + } + if obj.ResourceLock == "" { + // TODO(#80289): Figure out how to migrate to LeaseLock at this point. + // This will most probably require going through EndpointsLease first. + obj.ResourceLock = EndpointsResourceLock + } + if obj.LeaderElect == nil { + obj.LeaderElect = utilpointer.BoolPtr(true) + } +} + +// RecommendedDefaultClientConnectionConfiguration defaults a pointer to a +// ClientConnectionConfiguration struct. This will set the recommended default +// values, but they may be subject to change between API versions. This function +// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo` +// function to allow consumers of this type to set whatever defaults for their +// embedded configs. Forcing consumers to use these defaults would be problematic +// as defaulting in the scheme is done as part of the conversion, and there would +// be no easy way to opt-out. Instead, if you want to use this defaulting method +// run it in your wrapper struct of this type in its `SetDefaults_` method. +func RecommendedDefaultClientConnectionConfiguration(obj *ClientConnectionConfiguration) { + if len(obj.ContentType) == 0 { + obj.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.QPS == 0.0 { + obj.QPS = 50.0 + } + if obj.Burst == 0 { + obj.Burst = 100 + } +} + +// RecommendedDebuggingConfiguration defaults profiling and debugging configuration. +// This will set the recommended default +// values, but they may be subject to change between API versions. This function +// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo` +// function to allow consumers of this type to set whatever defaults for their +// embedded configs. Forcing consumers to use these defaults would be problematic +// as defaulting in the scheme is done as part of the conversion, and there would +// be no easy way to opt-out. Instead, if you want to use this defaulting method +// run it in your wrapper struct of this type in its `SetDefaults_` method. +func RecommendedDebuggingConfiguration(obj *DebuggingConfiguration) { + if obj.EnableProfiling == nil { + obj.EnableProfiling = utilpointer.BoolPtr(true) // profile debugging is cheap to have exposed and standard on kube binaries + } +} + +// NewRecommendedDebuggingConfiguration returns the current recommended DebuggingConfiguration. +// This may change between releases as recommendations shift. +func NewRecommendedDebuggingConfiguration() *DebuggingConfiguration { + ret := &DebuggingConfiguration{} + RecommendedDebuggingConfiguration(ret) + return ret +} + +// RecommendedLoggingConfiguration defaults logging configuration. +// This will set the recommended default +// values, but they may be subject to change between API versions. This function +// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo` +// function to allow consumers of this type to set whatever defaults for their +// embedded configs. Forcing consumers to use these defaults would be problematic +// as defaulting in the scheme is done as part of the conversion, and there would +// be no easy way to opt-out. Instead, if you want to use this defaulting method +// run it in your wrapper struct of this type in its `SetDefaults_` method. +func RecommendedLoggingConfiguration(obj *LoggingConfiguration) { + if obj.Format == "" { + obj.Format = "text" + } +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/doc.go b/vendor/k8s.io/component-base/config/v1alpha1/doc.go new file mode 100644 index 00000000000..3cd4f4292e5 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/component-base/config + +package v1alpha1 // import "k8s.io/component-base/config/v1alpha1" diff --git a/vendor/k8s.io/component-base/config/v1alpha1/register.go b/vendor/k8s.io/component-base/config/v1alpha1/register.go new file mode 100644 index 00000000000..ddc186c9aa4 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/register.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder runtime.SchemeBuilder + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) diff --git a/vendor/k8s.io/component-base/config/v1alpha1/types.go b/vendor/k8s.io/component-base/config/v1alpha1/types.go new file mode 100644 index 00000000000..cd56c1fcfc6 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const EndpointsResourceLock = "endpoints" + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect *bool `json:"leaderElect"` + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod metav1.Duration `json:"retryPeriod"` + // resourceLock indicates the resource object type that will be used to lock + // during leader election cycles. + ResourceLock string `json:"resourceLock"` + // resourceName indicates the name of resource object that will be used to lock + // during leader election cycles. + ResourceName string `json:"resourceName"` + // resourceName indicates the namespace of resource object that will be used to lock + // during leader election cycles. + ResourceNamespace string `json:"resourceNamespace"` +} + +// DebuggingConfiguration holds configuration for Debugging related features. +type DebuggingConfiguration struct { + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling *bool `json:"enableProfiling,omitempty"` + // enableContentionProfiling enables lock contention profiling, if + // enableProfiling is true. + EnableContentionProfiling *bool `json:"enableContentionProfiling,omitempty"` +} + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeconfig is the path to a KubeConfig file. + Kubeconfig string `json:"kubeconfig"` + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// LoggingConfiguration contains logging options +// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. +type LoggingConfiguration struct { + // Format Flag specifies the structure of log messages. + // default value of format is `text` + Format string `json:"format,omitempty"` + // [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). + // Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`) + Sanitization bool `json:"sanitization,omitempty"` +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000000..5490ba3ba66 --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,154 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + config "k8s.io/component-base/config" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddConversionFunc((*config.ClientConnectionConfiguration)(nil), (*ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(a.(*config.ClientConnectionConfiguration), b.(*ClientConnectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*config.DebuggingConfiguration)(nil), (*DebuggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(a.(*config.DebuggingConfiguration), b.(*DebuggingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*config.LeaderElectionConfiguration)(nil), (*LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(a.(*config.LeaderElectionConfiguration), b.(*LeaderElectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*config.LoggingConfiguration)(nil), (*LoggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_LoggingConfiguration_To_v1alpha1_LoggingConfiguration(a.(*config.LoggingConfiguration), b.(*LoggingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ClientConnectionConfiguration)(nil), (*config.ClientConnectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(a.(*ClientConnectionConfiguration), b.(*config.ClientConnectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*DebuggingConfiguration)(nil), (*config.DebuggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(a.(*DebuggingConfiguration), b.(*config.DebuggingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LeaderElectionConfiguration)(nil), (*config.LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(a.(*LeaderElectionConfiguration), b.(*config.LeaderElectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LoggingConfiguration)(nil), (*config.LoggingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LoggingConfiguration_To_config_LoggingConfiguration(a.(*LoggingConfiguration), b.(*config.LoggingConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *config.ClientConnectionConfiguration, s conversion.Scope) error { + out.Kubeconfig = in.Kubeconfig + out.AcceptContentTypes = in.AcceptContentTypes + out.ContentType = in.ContentType + out.QPS = in.QPS + out.Burst = in.Burst + return nil +} + +func autoConvert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *config.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error { + out.Kubeconfig = in.Kubeconfig + out.AcceptContentTypes = in.AcceptContentTypes + out.ContentType = in.ContentType + out.QPS = in.QPS + out.Burst = in.Burst + return nil +} + +func autoConvert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(in *DebuggingConfiguration, out *config.DebuggingConfiguration, s conversion.Scope) error { + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + if err := v1.Convert_Pointer_bool_To_bool(&in.EnableContentionProfiling, &out.EnableContentionProfiling, s); err != nil { + return err + } + return nil +} + +func autoConvert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(in *config.DebuggingConfiguration, out *DebuggingConfiguration, s conversion.Scope) error { + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + if err := v1.Convert_bool_To_Pointer_bool(&in.EnableContentionProfiling, &out.EnableContentionProfiling, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *config.LeaderElectionConfiguration, s conversion.Scope) error { + if err := v1.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + out.ResourceLock = in.ResourceLock + out.ResourceName = in.ResourceName + out.ResourceNamespace = in.ResourceNamespace + return nil +} + +func autoConvert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *config.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + if err := v1.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + out.ResourceLock = in.ResourceLock + out.ResourceName = in.ResourceName + out.ResourceNamespace = in.ResourceNamespace + return nil +} + +func autoConvert_v1alpha1_LoggingConfiguration_To_config_LoggingConfiguration(in *LoggingConfiguration, out *config.LoggingConfiguration, s conversion.Scope) error { + out.Format = in.Format + out.Sanitization = in.Sanitization + return nil +} + +func autoConvert_config_LoggingConfiguration_To_v1alpha1_LoggingConfiguration(in *config.LoggingConfiguration, out *LoggingConfiguration, s conversion.Scope) error { + out.Format = in.Format + out.Sanitization = in.Sanitization + return nil +} diff --git a/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f5f2a0e91ee --- /dev/null +++ b/vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,103 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration. +func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration { + if in == nil { + return nil + } + out := new(ClientConnectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DebuggingConfiguration) DeepCopyInto(out *DebuggingConfiguration) { + *out = *in + if in.EnableProfiling != nil { + in, out := &in.EnableProfiling, &out.EnableProfiling + *out = new(bool) + **out = **in + } + if in.EnableContentionProfiling != nil { + in, out := &in.EnableContentionProfiling, &out.EnableContentionProfiling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebuggingConfiguration. +func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { + if in == nil { + return nil + } + out := new(DebuggingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { + *out = *in + if in.LeaderElect != nil { + in, out := &in.LeaderElect, &out.LeaderElect + *out = new(bool) + **out = **in + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(LeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go new file mode 100644 index 00000000000..77260a06f00 --- /dev/null +++ b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go @@ -0,0 +1,88 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration. +func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration { + if in == nil { + return nil + } + out := new(ClientConnectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DebuggingConfiguration) DeepCopyInto(out *DebuggingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebuggingConfiguration. +func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { + if in == nil { + return nil + } + out := new(DebuggingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(LeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 94e4221d73d..d06e07d60d3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -805,7 +805,7 @@ go.opencensus.io/trace/tracestate go.uber.org/atomic # go.uber.org/multierr v1.5.0 go.uber.org/multierr -# go.uber.org/zap v1.14.1 +# go.uber.org/zap v1.15.0 go.uber.org/zap go.uber.org/zap/buffer go.uber.org/zap/internal/bufferpool @@ -966,7 +966,7 @@ golang.org/x/tools/internal/typesinternal # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# gomodules.xyz/jsonpatch/v2 v2.0.1 +# gomodules.xyz/jsonpatch/v2 v2.1.0 gomodules.xyz/jsonpatch/v2 # google.golang.org/api v0.33.0 => google.golang.org/api v0.25.0 ## explicit @@ -1567,6 +1567,7 @@ k8s.io/client-go/listers/scheduling/v1beta1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 +k8s.io/client-go/metadata k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 @@ -1654,6 +1655,8 @@ k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect # k8s.io/component-base v0.20.0 k8s.io/component-base/cli/flag +k8s.io/component-base/config +k8s.io/component-base/config/v1alpha1 k8s.io/component-base/featuregate k8s.io/component-base/logs k8s.io/component-base/logs/datapol @@ -1763,7 +1766,7 @@ sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 ## explicit sigs.k8s.io/cluster-api-provider-openstack/pkg/apis sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1 -# sigs.k8s.io/controller-runtime v0.6.2 => github.com/openshift-hive/controller-runtime v0.6.2-openshift +# sigs.k8s.io/controller-runtime v0.7.0 => github.com/openshift-hive/controller-runtime v0.7.0-openshift ## explicit sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -1773,6 +1776,8 @@ sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/config +sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/controller/controllerutil sigs.k8s.io/controller-runtime/pkg/conversion @@ -1798,7 +1803,6 @@ sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/runtime/inject -sigs.k8s.io/controller-runtime/pkg/runtime/signals sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source sigs.k8s.io/controller-runtime/pkg/source/internal @@ -1880,4 +1884,4 @@ sigs.k8s.io/yaml # google.golang.org/grpc => google.golang.org/grpc v1.29.1 # k8s.io/client-go => k8s.io/client-go v0.20.0 # github.com/hashicorp/go-slug => github.com/hashicorp/go-slug v0.5.0 -# sigs.k8s.io/controller-runtime => github.com/openshift-hive/controller-runtime v0.6.2-openshift +# sigs.k8s.io/controller-runtime => github.com/openshift-hive/controller-runtime v0.7.0-openshift diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml index 44a915409d0..97d52e4ea40 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -28,7 +28,6 @@ linters: - unparam - ineffassign - nakedret - - interfacer - gocyclo - lll - dupl diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile index 6152a8feba7..139c6b177ba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/Makefile +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -39,6 +39,7 @@ TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(TOOLS_DIR)/bin GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint) GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen # The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. # The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. @@ -66,6 +67,9 @@ $(GOLANGCI_LINT): $(TOOLS_DIR)/go.mod # Build golangci-lint from tools folder. $(GO_APIDIFF): $(TOOLS_DIR)/go.mod # Build go-apidiff from tools folder. cd $(TOOLS_DIR) && go build -tags=tools -o bin/go-apidiff github.com/joelanford/go-apidiff +$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen + ## -------------------------------------- ## Linting ## -------------------------------------- @@ -83,6 +87,10 @@ modules: ## Runs go mod to ensure modules are up to date. go mod tidy cd $(TOOLS_DIR); go mod tidy +.PHONY: generate +generate: $(CONTROLLER_GEN) ## Runs controller-gen for internal types for config file + $(CONTROLLER_GEN) object paths="./pkg/config/v1alpha1/...;./examples/configfile/custom/v1alpha1/..." + ## -------------------------------------- ## Cleanup / Verification ## -------------------------------------- @@ -98,5 +106,5 @@ clean-bin: ## Remove all generated binaries. .PHONY: verify-modules verify-modules: modules @if !(git diff --quiet HEAD -- go.sum go.mod); then \ - echo "go module files are out of date"; exit 1; \ + echo "go module files are out of date, please run 'make modules'"; exit 1; \ fi diff --git a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md index 866473fb122..9ee4b2a4316 100644 --- a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md +++ b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md @@ -90,6 +90,9 @@ It's acceptable to log call `log.Error` with a nil error object. This conveys that an error occurred in some capacity, but that no actual `error` object was involved. +Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`. +It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned. + ## Logging messages - Don't put variable content in your messages -- use key-value pairs for diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md index 0d906c5a20f..18779000ec2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md +++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -1,271 +1,30 @@ # Versioning and Branching in controller-runtime -*NB*: this also applies to controller-tools. +We follow the [common KubeBuilder versioning guidelines][guidelines], and +use the corresponding tooling. -## TL;DR: +For the purposes of the aforementioned guidelines, controller-runtime +counts as a "library project", but otherwise follows the guidelines +exactly. -### Users +[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md -- We follow [Semantic Versioning (semver)](https://semver.org) -- Use releases with your dependency management to ensure that you get - compatible code -- The master branch contains all the latest code, some of which may break - compatibility (so "normal" `go get` is not recommended) +## Compatiblity and Release Support -### Contributors +For release branches, we generally tend to support backporting one (1) +major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back +further if the need arises and is very pressing (e.g. security updates). -- All code PR must be labeled with :bug: (patch fixes), :sparkles: - (backwards-compatible features), or :warning: (breaking changes) +### Dependency Support -- Breaking changes will find their way into the next major release, other - changes will go into an semi-immediate patch or minor release +Note the [guidelines on dependency versions][dep-versions]. Particularly: -- Please *try* to avoid breaking changes when you can. They make users - face difficult decisions ("when do I go through the pain of - upgrading?"), and make life hard for maintainers and contributors - (dealing with differences on stable branches). +- We **DO** guarantee Kubernetes REST API compability -- if a given + version of controller-runtime stops working with what should be + a supported version of Kubernetes, this is almost certainly a bug. -### Mantainers +- We **DO NOT** guarantee any particular compability matrix between + kubernetes library dependencies (client-go, apimachinery, etc); Such + compability is infeasible due to the way those libraries are versioned. -Don't be lazy, read the rest of this doc :-) - -## Overview - -controller-runtime (and friends) follow [Semantic -Versioning](https://semver.org). I'd recommend reading the aforementioned -link if you're not familiar, but essentially, for any given release X.Y.Z: - -- an X (*major*) release indicates a set of backwards-compatible code. - Changing X means there's a breaking change. - -- a Y (*minor*) release indicates a minimum feature set. Changing Y means - the addition of a backwards-compatible feature. - -- a Z (*patch*) release indicates minimum set of bugfixes. Changing - Z means a backwards-compatible change that doesn't add functionality. - -*NB*: If the major release is `0`, any minor release may contain breaking -changes. - -These guarantees extend to all code exposed in public APIs of -controller-runtime. This includes code both in controller-runtime itself, -*plus types from dependencies in public APIs*. Types and functions not in -public APIs are not considered part of the guarantee. - -In order to easily maintain the guarantees, we have a couple of processes -that we follow. - -## Branches - -controller-runtime contains two types of branches: the *master* branch and -*release-X* branches. - -The *master* branch is where development happens. All the latest and -greatest code, including breaking changes, happens on master. - -The *release-X* branches contain stable, backwards compatible code. Every -major (X) release, a new such branch is created. It is from these -branches that minor and patch releases are tagged. If some cases, it may -be necessary open PRs for bugfixes directly against stable branches, but -this should generally not be the case. - -The maintainers are responsible for updating the contents of this branch; -generally, this is done just before a release using release tooling that -filters and checks for changes tagged as breaking (see below). - -### Tooling - -* [release-notes.sh](hack/release/release-notes.sh): generate release notes - for a range of commits, and check for next version type (***TODO***) - -* [verify-emoji.sh](hack/release/verify-emoji.sh): check that - your PR and/or commit messages have the right versioning icon - (***TODO***). - -## PR Process - -Every PR should be annotated with an icon indicating whether it's -a: - -- Breaking change: :warning: (`:warning:`) -- Non-breaking feature: :sparkles: (`:sparkles:`) -- Patch fix: :bug: (`:bug:`) -- Docs: :book: (`:book:`) -- Infra/Tests/Other: :seedling: (`:seedling:`) -- No release note: :ghost: (`:ghost:`) - -Use :ghost: (no release note) only for the PRs that change or revert unreleased -changes, which don't deserve a release note. Please don't abuse it. - -You can also use the equivalent emoji directly, since GitHub doesn't -render the `:xyz:` aliases in PR titles. - -Individual commits should not be tagged separately, but will generally be -assumed to match the PR. For instance, if you have a bugfix in with -a breaking change, it's generally encouraged to submit the bugfix -separately, but if you must put them in one PR, mark the commit -separately. - -### Commands and Workflow - -controller-runtime follows the standard Kubernetes workflow: any PR needs -`lgtm` and `approved` labels, PRs authors must have signed the CNCF CLA, -and PRs must pass the tests before being merged. See [the contributor -docs](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#the-testing-and-merge-workflow) -for more info. - -We use the same priority and kind labels as Kubernetes. See the labels -tab in GitHub for the full list. - -The standard Kubernetes comment commands should work in -controller-runtime. See [Prow](https://prow.k8s.io/command-help) for -a command reference. - -## Release Process - -Minor and patch releases are generally done immediately after a feature or -bugfix is landed, or sometimes a series of features tied together. - -Minor releases will only be tagged on the *most recent* major release -branch, except in exceptional circumstances. Patches will be backported -to maintained stable versions, as needed. - -Major releases are done shortly after a breaking change is merged -- once -a breaking change is merged, the next release *must* be a major revision. -We don't intend to have a lot of these, so we may put off merging breaking -PRs until a later date. - -### Exact Steps - -Follow the release-specific steps below, then follow the general steps -after that. - -#### Minor and patch releases - -1. Update the release-X branch with the latest set of changes by calling - `git rebase master` from the release branch. - -#### Major releases - -1. Create a new release branch named `release-X` (where `X` is the new - version) off of master. - -#### General - -2. Generate release notes using the release note tooling. - -3. Add a release for controller-runtime on GitHub, using those release - notes, with a title of `vX.Y.Z`. - -4. Do a similar process for - [controller-tools](https://github.com/kubernetes-sigs/controller-tools) - -5. Announce the release in `#kubebuilder` on Slack with a pinned message. - -6. Potentially update - [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) as well. - -### Breaking Changes - -Try to avoid breaking changes. They make life difficult for users, who -have to rewrite their code when they eventually upgrade, and for -maintainers/contributors, who have to deal with differences between master -and stable branches. - -That being said, we'll occasionally want to make breaking changes. They'll -be merged onto master, and will then trigger a major release (see [Release -Process](#release-process)). Because breaking changes induce a major -revision, the maintainers may delay a particular breaking change until -a later date when they are ready to make a major revision with a few -breaking changes. - -If you're going to make a breaking change, please make sure to explain in -detail why it's helpful. Is it necessary to cleanly resolve an issue? -Does it improve API ergonomics? - -Maintainers should treat breaking changes with caution, and evaluate -potential non-breaking solutions (see below). - -Note that API breakage in public APIs due to dependencies will trigger -a major revision, so you may occasionally need to have a major release -anyway, due to changes in libraries like `k8s.io/client-go` or -`k8s.io/apimachinery`. - -*NB*: Pre-1.0 releases treat breaking changes a bit more lightly. We'll -still consider carefully, but the pre-1.0 timeframe is useful for -converging on a ergonomic API. - -#### Avoiding breaking changes - -##### Solutions to avoid - -- **Confusingly duplicate methods, functions, or variables.** - - For instance, suppose we have an interface method `List(ctx - context.Context, options *ListOptions, obj runtime.Object) error`, and - we decide to switch it so that options come at the end, parametrically. - Adding a new interface method `ListParametric(ctx context.Context, obj - runtime.Object, options... ListOption)` is probably not the right - solution: - - - Users will intuitively see `List`, and use that in new projects, even - if it's marked as deprecated. - - - Users who don't notice the deprecation may be confused as to the - difference between `List` and `ListParametric`. - - - It's not immediately obvious in isolation (e.g. in surrounding code) - why the method is called `ListParametric`, and may cause confusion - when reading code that makes use of that method. - - In this case, it may be better to make the breaking change, and then - eventually do a major release. - -## Why don't we... - -### Use "next"-style branches - -Development branches: - -- don't win us much in terms of maintenance in the case of breaking - changes (we still have to merge/manage multiple branches for development - and stable) - -- can be confusing to contributors, who often expect master to have the - latest changes. - -### Never break compatibility - -Never doing a new major release could be an admirable goal, but gradually -leads to API cruft. - -Since one of the goals of controller-runtime is to be a friendly and -intuitive API, we want to avoid too much API cruft over time, and -occasional breaking changes in major releases help accomplish that goal. - -Furthermore, our dependency on Kubernetes libraries makes this difficult -(see below) - -### Always assume we've broken compatibility - -*a.k.a. k8s.io/client-go style* - -While this makes life easier (a bit) for maintainers, it's problematic for -users. While breaking changes arrive sooner, upgrading becomes very -painful. - -Furthermore, we still have to maintain stable branches for bugfixes, so -the maintenance burden isn't lessened by a ton. - -### Extend compatibility guarantees to all dependencies - -This is very difficult with the number of Kubernetes dependencies we have. -Kubernetes dependencies tend to either break compatibility every major -release (e.g. k8s.io/client-go, which loosely follows semver), or at -a whim (many other Kubernetes libraries). - -If we limit to the few objects we expose, we can better inform users about -how *controller-runtime itself* has changed in a given release. Then, -users can make informed decisions about how to proceed with any direct -uses of Kubernetes dependencies their controller-runtime-based application -may have. +[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go index 4792a67ff03..9bda96616e2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -91,6 +92,11 @@ var ( // * $HOME/.kube/config if exists GetConfig = config.GetConfig + // ConfigFile returns the cfg.File function for deferred config file loading, + // this is passed into Options{}.From() to populate the Options fields for + // the manager. + ConfigFile = cfg.File + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager NewControllerManagedBy = builder.ControllerManagedBy @@ -125,10 +131,19 @@ var ( // get any actual logging. Log = log.Log - // LoggerFromContext returns a logger with predefined values from a context.Context. + // LoggerFrom returns a logger with predefined values from a context.Context. + // The logger, when used with controllers, can be expected to contain basic information about the object + // that's being reconciled like: + // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. + // - `name` and `namespace` injected from the reconciliation request. // // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. - LoggerFromContext = log.FromContext + LoggerFrom = log.FromContext + + // LoggerInto takes a context and sets the logger as one of its keys. + // + // This is meant to be used in reconcilers to enrich the logger within a context with additional values. + LoggerInto = log.IntoContext // SetLogger sets a concrete logging implementation for all deferred Loggers. SetLogger = log.SetLogger diff --git a/vendor/sigs.k8s.io/controller-runtime/go.mod b/vendor/sigs.k8s.io/controller-runtime/go.mod index ed1c686f1b9..7eaab39b5d2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.mod +++ b/vendor/sigs.k8s.io/controller-runtime/go.mod @@ -1,34 +1,30 @@ module sigs.k8s.io/controller-runtime -go 1.13 +go 1.15 require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/evanphx/json-patch v4.5.0+incompatible + github.com/evanphx/json-patch v4.9.0+incompatible github.com/fsnotify/fsnotify v1.4.9 - github.com/go-logr/logr v0.1.0 - github.com/go-logr/zapr v0.1.0 - github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/googleapis/gnostic v0.3.1 // indirect + github.com/go-logr/logr v0.3.0 + github.com/go-logr/zapr v0.2.0 + github.com/google/go-cmp v0.5.2 // indirect + github.com/googleapis/gnostic v0.5.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/imdario/mergo v0.3.9 // indirect - github.com/json-iterator/go v1.1.10 // indirect - github.com/onsi/ginkgo v1.12.1 - github.com/onsi/gomega v1.10.1 - github.com/prometheus/client_golang v1.0.0 + github.com/imdario/mergo v0.3.10 // indirect + github.com/onsi/ginkgo v1.14.1 + github.com/onsi/gomega v1.10.2 + github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/procfs v0.0.11 // indirect - github.com/spf13/pflag v1.0.5 - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/zap v1.10.0 - golang.org/x/text v0.3.3 // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 - gomodules.xyz/jsonpatch/v2 v2.0.1 - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - k8s.io/api v0.18.6 - k8s.io/apiextensions-apiserver v0.18.6 - k8s.io/apimachinery v0.18.6 - k8s.io/client-go v0.18.6 - k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 + go.uber.org/goleak v1.1.10 + go.uber.org/zap v1.15.0 + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e + gomodules.xyz/jsonpatch/v2 v2.1.0 + google.golang.org/appengine v1.6.6 // indirect + k8s.io/api v0.19.2 + k8s.io/apiextensions-apiserver v0.19.2 + k8s.io/apimachinery v0.19.2 + k8s.io/client-go v0.19.2 + k8s.io/component-base v0.19.2 + k8s.io/utils v0.0.0-20200912215256-4140de9c8800 sigs.k8s.io/yaml v1.2.0 ) diff --git a/vendor/sigs.k8s.io/controller-runtime/go.sum b/vendor/sigs.k8s.io/controller-runtime/go.sum index 60d3796b3de..2a7ba18ae72 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.sum +++ b/vendor/sigs.k8s.io/controller-runtime/go.sum @@ -1,98 +1,115 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -105,13 +122,11 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -134,7 +149,6 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -146,86 +160,88 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -236,17 +252,17 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -259,113 +275,157 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -377,59 +437,72 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -438,46 +511,81 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -489,46 +597,48 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= -k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= -k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= -k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= -k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= -k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= -k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= +k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE= -k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go index 0d91a43a48c..11bbea3c1da 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -21,9 +21,11 @@ import ( "strings" "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -37,6 +39,17 @@ import ( var newController = controller.New var getGvk = apiutil.GVKForObject +// project represents other forms that the we can use to +// send/receive a given resource (metadata-only, unstructured, etc) +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into an metadata-only watch + projectAsMetadata +) + // Builder builds a Controller. type Builder struct { forInput ForInput @@ -47,7 +60,6 @@ type Builder struct { config *rest.Config ctrl controller.Controller ctrlOptions controller.Options - log logr.Logger name string } @@ -56,27 +68,23 @@ func ControllerManagedBy(m manager.Manager) *Builder { return &Builder{mgr: m} } -// ForType defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / -// update events by *reconciling the object*. -// This is the equivalent of calling -// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -// -// Deprecated: Use For -func (blder *Builder) ForType(apiType runtime.Object) *Builder { - return blder.For(apiType) -} - // ForInput represents the information set by For method. type ForInput struct { - object runtime.Object - predicates []predicate.Predicate + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection + err error } // For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / // update events by *reconciling the object*. // This is the equivalent of calling // Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -func (blder *Builder) For(object runtime.Object, opts ...ForOption) *Builder { +func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { + if blder.forInput.object != nil { + blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") + return blder + } input := ForInput{object: object} for _, opt := range opts { opt.ApplyToFor(&input) @@ -88,14 +96,15 @@ func (blder *Builder) For(object runtime.Object, opts ...ForOption) *Builder { // OwnsInput represents the information set by Owns method. type OwnsInput struct { - object runtime.Object - predicates []predicate.Predicate + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection } // Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to // create / delete / update events by *reconciling the owner object*. This is the equivalent of calling // Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}) -func (blder *Builder) Owns(object runtime.Object, opts ...OwnsOption) *Builder { +func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { input := OwnsInput{object: object} for _, opt := range opts { opt.ApplyToOwns(&input) @@ -107,9 +116,10 @@ func (blder *Builder) Owns(object runtime.Object, opts ...OwnsOption) *Builder { // WatchesInput represents the information set by Watches method. type WatchesInput struct { - src source.Source - eventhandler handler.EventHandler - predicates []predicate.Predicate + src source.Source + eventhandler handler.EventHandler + predicates []predicate.Predicate + objectProjection objectProjection } // Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using @@ -125,14 +135,6 @@ func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandl return blder } -// WithConfig sets the Config to use for configuring clients. Defaults to the in-cluster config or to ~/.kube/config. -// -// Deprecated: Use ControllerManagedBy(Manager) and this isn't needed. -func (blder *Builder) WithConfig(config *rest.Config) *Builder { - blder.config = config - return blder -} - // WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually // trigger reconciliations. For example, filtering on whether the resource version has changed. // Given predicate is added for all watched objects. @@ -148,6 +150,12 @@ func (blder *Builder) WithOptions(options controller.Options) *Builder { return blder } +// WithLogger overrides the controller options's logger used. +func (blder *Builder) WithLogger(log logr.Logger) *Builder { + blder.ctrlOptions.Log = log + return blder +} + // Named sets the name of the controller to the given name. The name shows up // in metrics, among other things, and thus should be a prometheus compatible name // (underscores and alphanumeric characters only). @@ -158,12 +166,6 @@ func (blder *Builder) Named(name string) *Builder { return blder } -// WithLogger overrides the controller options's logger used. -func (blder *Builder) WithLogger(log logr.Logger) *Builder { - blder.log = log - return blder -} - // Complete builds the Application ControllerManagedBy. func (blder *Builder) Complete(r reconcile.Reconciler) error { _, err := blder.Build(r) @@ -178,6 +180,13 @@ func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, erro if blder.mgr == nil { return nil, fmt.Errorf("must provide a non-nil Manager") } + if blder.forInput.err != nil { + return nil, blder.forInput.err + } + // Checking the reconcile type exist or not + if blder.forInput.object == nil { + return nil, fmt.Errorf("must provide an object for reconciliation") + } // Set the Config blder.loadRestConfig() @@ -195,19 +204,43 @@ func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, erro return blder.ctrl, nil } +func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + func (blder *Builder) doWatch() error { // Reconcile type - src := &source.Kind{Type: blder.forInput.object} + typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} hdler := &handler.EnqueueRequestForObject{} allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) - err := blder.ctrl.Watch(src, hdler, allPredicates...) - if err != nil { + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { return err } // Watches the managed types for _, own := range blder.ownsInput { - src := &source.Kind{Type: own.object} + typeForSrc, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} hdler := &handler.EnqueueRequestForOwner{ OwnerType: blder.forInput.object, IsController: true, @@ -223,10 +256,19 @@ func (blder *Builder) doWatch() error { for _, w := range blder.watchesInput { allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) + + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type, w.objectProjection) + if err != nil { + return err + } + srckind.Type = typeForSrc + } + if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil { return err } - } return nil } @@ -261,7 +303,7 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { if ctrlOptions.Log == nil { ctrlOptions.Log = blder.mgr.GetLogger() } - ctrlOptions.Log = ctrlOptions.Log.WithValues("reconcilerGroup", gvk.Group, "reconcilerKind", gvk.Kind) + ctrlOptions.Log = ctrlOptions.Log.WithValues("reconciler group", gvk.Group, "reconciler kind", gvk.Kind) // Build the controller and return. blder.ctrl, err = newController(blder.getControllerName(gvk), blder.mgr, ctrlOptions) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go index edd5d0156b2..7bb42730948 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -76,3 +76,42 @@ var _ OwnsOption = &Predicates{} var _ WatchesOption = &Predicates{} // }}} + +// {{{ For & Owns Dual-Type options + +// asProjection configures the projection (currently only metadata) on the input. +// Currently only metadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts *WatchesInput) { + opts.objectProjection = objectProjection(p) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 0d7bdd849ee..71dfbd04540 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -52,18 +52,18 @@ type Cache interface { type Informers interface { // GetInformer fetches or constructs an informer for the given object that corresponds to a single // API kind and resource. - GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) + GetInformer(ctx context.Context, obj client.Object) (Informer, error) // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead // of the underlying object. GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) - // Start runs all the informers known to this cache until the given channel is closed. + // Start runs all the informers known to this cache until the context is closed. // It blocks. - Start(stopCh <-chan struct{}) error + Start(ctx context.Context) error // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. - WaitForCacheSync(stop <-chan struct{}) bool + WaitForCacheSync(ctx context.Context) bool // Informers knows how to add indices to the caches (informers) that it manages. client.FieldIndexer diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 043c295f0a7..8ec3b921d98 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -51,7 +51,7 @@ type informerCache struct { } // Get implements Reader -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runtime.Object) error { +func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error { gvk, err := apiutil.GVKForObject(out, ip.Scheme) if err != nil { return err @@ -69,7 +69,7 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runt } // List implements Reader -func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOption) error { +func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) if err != nil { @@ -91,7 +91,7 @@ func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...c // objectTypeForListObject tries to find the runtime.Object and associated GVK // for a single object corresponding to the passed-in list type. We need them // because they are used as cache map key. -func (ip *informerCache) objectTypeForListObject(list runtime.Object) (*schema.GroupVersionKind, runtime.Object, error) { +func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { gvk, err := apiutil.GVKForObject(list, ip.Scheme) if err != nil { return nil, nil, err @@ -146,7 +146,7 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou } // GetInformer returns the informer for the obj -func (ip *informerCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) { +func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { gvk, err := apiutil.GVKForObject(obj, ip.Scheme) if err != nil { return nil, err @@ -170,7 +170,7 @@ func (ip *informerCache) NeedLeaderElection() bool { // to List. For one-to-one compatibility with "normal" field selectors, only return one value. // The values may be anything. They will automatically be prefixed with the namespace of the // given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error { +func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { informer, err := ip.GetInformer(ctx, obj) if err != nil { return err @@ -181,7 +181,7 @@ func (ip *informerCache) IndexField(ctx context.Context, obj runtime.Object, fie func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { indexFunc := func(objRaw interface{}) ([]string, error) { // TODO(directxman12): check if this is the correct type? - obj, isObj := objRaw.(runtime.Object) + obj, isObj := objRaw.(client.Object) if !isObj { return nil, fmt.Errorf("object of type %T is not an Object", objRaw) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index 5c3fac9228c..630a4b131e5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -45,7 +45,7 @@ type CacheReader struct { } // Get checks the indexer for the object and writes a copy of it if found -func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.Object) error { +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error { storeKey := objectKeyToStoreKey(key) // Lookup the object from the indexer cache @@ -87,7 +87,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O } // List lists items out of the indexer and writes them to out -func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error { +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { var objs []interface{} var err error diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index cdaf1fc21c3..02bb1919f7c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,10 +32,12 @@ import ( // InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. // It uses a standard parameter codec constructed based on the given generated Scheme. type InformersMap struct { - // we abstract over the details of structured vs unstructured with the specificInformerMaps + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps structured *specificInformersMap unstructured *specificInformersMap + metadata *specificInformersMap // Scheme maps runtime.Objects to GroupVersionKinds Scheme *runtime.Scheme @@ -51,45 +54,54 @@ func NewInformersMap(config *rest.Config, return &InformersMap{ structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace), unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace), Scheme: scheme, } } -// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel. -func (m *InformersMap) Start(stop <-chan struct{}) error { - go m.structured.Start(stop) - go m.unstructured.Start(stop) - <-stop +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +func (m *InformersMap) Start(ctx context.Context) error { + go m.structured.Start(ctx) + go m.unstructured.Start(ctx) + go m.metadata.Start(ctx) + <-ctx.Done() return nil } // WaitForCacheSync waits until all the caches have been started and synced. -func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { +func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) - if !m.structured.waitForStarted(stop) { + if !m.structured.waitForStarted(ctx) { return false } - if !m.unstructured.waitForStarted(stop) { + if !m.unstructured.waitForStarted(ctx) { return false } - return cache.WaitForCacheSync(stop, syncedFuncs...) + if !m.metadata.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) } // Get will create a new Informer and add it to the map of InformersMap if none exists. Returns // the Informer from the map. func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList - - if isUnstructured { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) } - - return m.structured.Get(ctx, gvk, obj) } // newStructuredInformersMap creates a new InformersMap for structured objects. @@ -101,3 +113,8 @@ func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapp func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch) } + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createMetadataListWatch) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index 1068a173834..27403f90ea5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -121,33 +122,33 @@ type specificInformersMap struct { namespace string } -// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel. +// Start calls Run on each of the informers and sets started to true. Blocks on the context. // It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(stop <-chan struct{}) { +func (ip *specificInformersMap) Start(ctx context.Context) { func() { ip.mu.Lock() defer ip.mu.Unlock() // Set the stop channel so it can be passed to informers that are added later - ip.stop = stop + ip.stop = ctx.Done() // Start each informer for _, informer := range ip.informersByGVK { - go informer.Informer.Run(stop) + go informer.Informer.Run(ctx.Done()) } // Set started to true so we immediately start any informers added later. ip.started = true close(ip.startWait) }() - <-stop + <-ctx.Done() } -func (ip *specificInformersMap) waitForStarted(stop <-chan struct{}) bool { +func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { select { case <-ip.startWait: return true - case <-stop: + case <-ctx.Done(): return false } } @@ -235,7 +236,7 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer return nil, err } - client, err := apiutil.RESTClientForGVK(gvk, ip.config, ip.codecs) + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) if err != nil { return nil, err } @@ -301,6 +302,44 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform }, nil } +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // grab the metadata client + client, err := metadata.NewForConfig(ip.config) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwaatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + } + return client.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Watch needs to be set to true separately + opts.Watch = true + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + } + return client.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + // resyncPeriod returns a function which generates a duration each time it is // invoked; this is so that multiple controllers don't get into lock-step and all // hammer the apiserver with list requests simultaneously. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index 175437d9bea..f0e18c09b0c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -70,7 +70,7 @@ type multiNamespaceCache struct { var _ Cache = &multiNamespaceCache{} // Methods for multiNamespaceCache to conform to the Informers interface -func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj runtime.Object) (Informer, error) { +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { informers := map[string]Informer{} for ns, cache := range c.namespaceToCache { informer, err := cache.GetInformer(ctx, obj) @@ -94,30 +94,30 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema return &multiNamespaceInformer{namespaceToInformer: informers}, nil } -func (c *multiNamespaceCache) Start(stopCh <-chan struct{}) error { +func (c *multiNamespaceCache) Start(ctx context.Context) error { for ns, cache := range c.namespaceToCache { go func(ns string, cache Cache) { - err := cache.Start(stopCh) + err := cache.Start(ctx) if err != nil { log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) } }(ns, cache) } - <-stopCh + <-ctx.Done() return nil } -func (c *multiNamespaceCache) WaitForCacheSync(stop <-chan struct{}) bool { +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { synced := true for _, cache := range c.namespaceToCache { - if s := cache.WaitForCacheSync(stop); !s { + if s := cache.WaitForCacheSync(ctx); !s { synced = s } } return synced } -func (c *multiNamespaceCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error { +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { for _, cache := range c.namespaceToCache { if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { return err @@ -126,7 +126,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj runtime.Object return nil } -func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { cache, ok := c.namespaceToCache[key.Namespace] if !ok { return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) @@ -135,7 +135,7 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj } // List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. -func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error { +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) if listOpts.Namespace != corev1.NamespaceAll { @@ -157,7 +157,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opt } var resourceVersion string for _, cache := range c.namespaceToCache { - listObj := list.DeepCopyObject() + listObj := list.DeepCopyObject().(client.ObjectList) err = cache.List(ctx, listObj, opts...) if err != nil { return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 9fe32b21f35..b3464c655d0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -21,16 +21,41 @@ package apiutil import ( "fmt" + "sync" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" ) +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { @@ -48,6 +73,27 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + gvks, isUnversioned, err := scheme.ObjectKinds(obj) if err != nil { return schema.GroupVersionKind{}, err @@ -71,8 +117,8 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - cfg := createRestConfig(gvk, baseConfig) +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + cfg := createRestConfig(gvk, isUnstructured, baseConfig) if cfg.NegotiatedSerializer == nil { cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} } @@ -80,7 +126,7 @@ func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, code } //createRestConfig copies the base config and updates needed fields for a new rest config -func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *rest.Config { +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config) *rest.Config { gv := gvk.GroupVersion() cfg := rest.CopyConfig(baseConfig) @@ -93,5 +139,13 @@ func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *res if cfg.UserAgent == "" { cfg.UserAgent = rest.DefaultKubernetesUserAgent() } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } return cfg } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go index 5c34070e4bd..5e9a7b5f53d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -19,7 +19,6 @@ package apiutil import ( "errors" "sync" - "time" "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/api/meta" @@ -29,34 +28,12 @@ import ( "k8s.io/client-go/restmapper" ) -// ErrRateLimited is returned by a RESTMapper method if the number of API -// calls has exceeded a limit within a certain time period. -type ErrRateLimited struct { - // Duration to wait until the next API call can be made. - Delay time.Duration -} - -func (e ErrRateLimited) Error() string { - return "too many API calls to the RESTMapper within a timeframe" -} - -// DelayIfRateLimited returns the delay time until the next API call is -// allowed and true if err is of type ErrRateLimited. The zero -// time.Duration value and false are returned if err is not a ErrRateLimited. -func DelayIfRateLimited(err error) (time.Duration, bool) { - var rlerr ErrRateLimited - if errors.As(err, &rlerr) { - return rlerr.Delay, true - } - return 0, false -} - // dynamicRESTMapper is a RESTMapper that dynamically discovers resource // types at runtime. type dynamicRESTMapper struct { mu sync.RWMutex // protects the following fields staticMapper meta.RESTMapper - limiter *dynamicLimiter + limiter *rate.Limiter newMapper func() (meta.RESTMapper, error) lazy bool @@ -70,7 +47,7 @@ type DynamicRESTMapperOption func(*dynamicRESTMapper) error // WithLimiter sets the RESTMapper's underlying limiter to lim. func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { return func(drm *dynamicRESTMapper) error { - drm.limiter = &dynamicLimiter{lim} + drm.limiter = lim return nil } } @@ -103,9 +80,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (me return nil, err } drm := &dynamicRESTMapper{ - limiter: &dynamicLimiter{ - rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - }, + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), newMapper: func() (meta.RESTMapper, error) { groupResources, err := restmapper.GetAPIGroupResources(client) if err != nil { @@ -161,12 +136,13 @@ func (drm *dynamicRESTMapper) init() (err error) { // checkAndReload attempts to call the given callback, which is assumed to be dependent // on the data in the restmapper. // -// If the callback returns a NoKindMatchError, it will attempt to reload +// If the callback returns an error that matches the given error, it will attempt to reload // the RESTMapper's data and re-call the callback once that's occurred. // If the callback returns any other error, the function will return immediately regardless. // -// It will take care -// ensuring that reloads are rate-limitted and that extraneous calls aren't made. +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. // It's thread-safe, and worries about thread-safety for the callback (so the callback does // not need to attempt to lock the restmapper). func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { @@ -199,7 +175,9 @@ func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsRel } // we're still stale, so grab a rate-limit token if we can... - if err := drm.limiter.checkRate(); err != nil { + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError return err } @@ -305,19 +283,3 @@ func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, err }) return singular, err } - -// dynamicLimiter holds a rate limiter used to throttle chatty RESTMapper users. -type dynamicLimiter struct { - *rate.Limiter -} - -// checkRate returns an ErrRateLimited if too many API calls have been made -// within the set limit. -func (b *dynamicLimiter) checkRate() error { - res := b.Reserve() - if res.Delay() == 0 { - return nil - } - res.Cancel() - return ErrRateLimited{res.Delay()} -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index c1c4d5d6913..0af814fdf90 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -21,12 +21,15 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -69,11 +72,18 @@ func New(config *rest.Config, options Options) (Client, error) { } clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), - resourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ @@ -85,6 +95,12 @@ func New(config *rest.Config, options Options) (Client, error) { cache: clientcache, paramCodec: noConversionParamCodec{}, }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, } return c, nil @@ -97,6 +113,9 @@ var _ Client = &client{} type client struct { typedClient typedClient unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -109,69 +128,100 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + // Create implements client.Client -func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) } - return c.typedClient.Create(ctx, obj, opts...) } // Update implements client.Client -func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) } - return c.typedClient.Update(ctx, obj, opts...) } // Delete implements client.Client -func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) } - return c.typedClient.Delete(ctx, obj, opts...) } // DeleteAllOf implements client.Client -func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) } - return c.typedClient.DeleteAllOf(ctx, obj, opts...) } // Patch implements client.Client -func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) } - return c.typedClient.Patch(ctx, obj, patch, opts...) } // Get implements client.Client -func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) } - return c.typedClient.Get(ctx, key, obj) } // List implements client.Client -func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { - _, ok := obj.(*unstructured.UnstructuredList) - if ok { +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch obj.(type) { + case *unstructured.UnstructuredList: return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + return c.metadataClient.List(ctx, obj, opts...) + default: + return c.typedClient.List(ctx, obj, opts...) } - return c.typedClient.List(ctx, obj, opts...) } // Status implements client.StatusClient @@ -188,21 +238,27 @@ type statusWriter struct { var _ StatusWriter = &statusWriter{} // Update implements client.StatusWriter -func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } - return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } // Patch implements client.Client -func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } - return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index 7741ac3c7ed..bf6ee882bba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -43,20 +44,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // resourceByType caches type metadata - resourceByType map[schema.GroupVersionKind]*resourceMeta - mu sync.RWMutex + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList bool) (*resourceMeta, error) { +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) if err != nil { return nil, err } @@ -75,10 +78,18 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { return nil, err } + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + // It's better to do creation work twice than to not let multiple // people make requests at once c.mu.RLock() - r, known := c.resourceByType[gvk] + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] c.mu.RUnlock() if known { @@ -88,11 +99,11 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { // Initialize a new Client c.mu.Lock() defer c.mu.Unlock() - r, err = c.newResource(gvk, meta.IsListType(obj)) + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) if err != nil { return nil, err } - c.resourceByType[gvk] = r + resourceByType[gvk] = r return r, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index 0a31419eff8..69f7dc6e4ad 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -30,19 +30,14 @@ import ( ) var ( - kubeconfig, apiServerURL string - log = logf.RuntimeLog.WithName("client").WithName("config") + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") ) func init() { // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users flag.StringVar(&kubeconfig, "kubeconfig", "", "Paths to a kubeconfig. Only required if out-of-cluster.") - - // This flag is deprecated, it'll be removed in a future iteration, please switch to --kubeconfig. - flag.StringVar(&apiServerURL, "master", "", - "(Deprecated: switch to `--kubeconfig`) The address of the Kubernetes API server. Overrides any value in kubeconfig. "+ - "Only required if out-of-cluster.") } // GetConfig creates a *rest.Config for talking to a Kubernetes API server. @@ -105,7 +100,7 @@ func loadConfig(context string) (*rest.Config, error) { // If a flag is specified with the config location, use that if len(kubeconfig) > 0 { - return loadConfigWithContext(apiServerURL, &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) } // If the recommended kubeconfig env variable is not specified, @@ -134,7 +129,7 @@ func loadConfig(context string) (*rest.Config, error) { loadingRules.Precedence = append(loadingRules.Precedence, path.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) } - return loadConfigWithContext(apiServerURL, loadingRules, context) + return loadConfigWithContext("", loadingRules, context) } func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index ced0548b1ae..67e80e0551d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -19,6 +19,7 @@ package client import ( "context" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" ) @@ -35,38 +36,48 @@ type dryRunClient struct { client Client } +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + // Create implements client.Client -func (c *dryRunClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) } // Update implements client.Client -func (c *dryRunClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return c.client.Update(ctx, obj, append(opts, DryRunAll)...) } // Delete implements client.Client -func (c *dryRunClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) } // DeleteAllOf implements client.Client -func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) } // Patch implements client.Client -func (c *dryRunClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } // Get implements client.Client -func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { return c.client.Get(ctx, key, obj) } // List implements client.Client -func (c *dryRunClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { return c.client.List(ctx, obj, opts...) } @@ -85,11 +96,11 @@ type dryRunStatusWriter struct { } // Update implements client.StatusWriter -func (sw *dryRunStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) } // Patch implements client.StatusWriter -func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index e7d3c5c35b7..f5aec0a0539 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -41,6 +41,7 @@ import ( type versionedTracker struct { testing.ObjectTracker + scheme *runtime.Scheme } type fakeClient struct { @@ -58,33 +59,91 @@ const ( // NewFakeClient creates a new fake client for testing. // You can choose to initialize it with a slice of runtime.Object. -// Deprecated: use NewFakeClientWithScheme. You should always be -// passing an explicit Scheme. +// +// Deprecated: Please use NewClientBuilder instead. func NewFakeClient(initObjs ...runtime.Object) client.Client { - return NewFakeClientWithScheme(scheme.Scheme, initObjs...) + return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() } // NewFakeClientWithScheme creates a new fake client with the given scheme // for testing. // You can choose to initialize it with a slice of runtime.Object. +// +// Deprecated: Please use NewClientBuilder instead. func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { - tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()) - for _, obj := range initObjs { - err := tracker.Add(obj) - if err != nil { + return NewClientBuilder().WithScheme(clientScheme).WithRuntimeObjects(initObjs...).Build() +} + +// NewClientBuilder returns a new builder to create a fake client. +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{} +} + +// ClientBuilder builds a fake client. +type ClientBuilder struct { + scheme *runtime.Scheme + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object +} + +// WithScheme sets this builder's internal scheme. +// If not set, defaults to client-go's global scheme.Scheme. +func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder { + f.scheme = scheme + return f +} + +// WithObjects can be optionally used to initialize this fake client with client.Object(s). +func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder { + f.initObject = append(f.initObject, initObjs...) + return f +} + +// WithLists can be optionally used to initialize this fake client with client.ObjectList(s). +func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder { + f.initLists = append(f.initLists, initLists...) + return f +} + +// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s). +func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder { + f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...) + return f +} + +// Build builds and returns a new fake client. +func (f *ClientBuilder) Build() client.Client { + if f.scheme == nil { + f.scheme = scheme.Scheme + } + + tracker := testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()) + for _, obj := range f.initObject { + if err := tracker.Add(obj); err != nil { panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) } } + for _, obj := range f.initLists { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initRuntimeObjects { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) + } + } return &fakeClient{ - tracker: versionedTracker{tracker}, - scheme: clientScheme, + tracker: versionedTracker{ObjectTracker: tracker, scheme: f.scheme}, + scheme: f.scheme, } } func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { accessor, err := meta.Accessor(obj) if err != nil { - return err + return fmt.Errorf("failed to get accessor for object: %v", err) } if accessor.GetName() == "" { return apierrors.NewInvalid( @@ -108,20 +167,42 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob if err != nil { return fmt.Errorf("failed to get accessor for object: %v", err) } + if accessor.GetName() == "" { return apierrors.NewInvalid( obj.GetObjectKind().GroupVersionKind().GroupKind(), accessor.GetName(), field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + gvk, err = apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + } + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) if err != nil { + // If the resource is not found and the resource allows create on update, issue a + // create instead. + if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) { + return t.Create(gvr, obj, ns) + } return err } + oldAccessor, err := meta.Accessor(oldObject) if err != nil { return err } + + // If the new object does not have the resource version set and it allows unconditional update, + // default it to the resource version of the existing resource + if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) { + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + } if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) } @@ -137,7 +218,7 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob return t.ObjectTracker.Update(gvr, obj, ns) } -func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err @@ -167,7 +248,7 @@ func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime. return err } -func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOption) error { +func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err @@ -224,7 +305,16 @@ func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...clien return nil } -func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { +func (c *fakeClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeClient) RESTMapper() meta.RESTMapper { + // TODO: Implement a fake RESTMapper. + return nil +} + +func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { createOptions := &client.CreateOptions{} createOptions.ApplyOptions(opts) @@ -254,7 +344,7 @@ func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...cli return c.tracker.Create(gvr, obj, accessor.GetNamespace()) } -func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { +func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err @@ -270,7 +360,7 @@ func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...cli return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) } -func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err @@ -306,7 +396,7 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts . return nil } -func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { +func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { updateOptions := &client.UpdateOptions{} updateOptions.ApplyOptions(opts) @@ -327,7 +417,7 @@ func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...cli return c.tracker.Update(gvr, obj, accessor.GetNamespace()) } -func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { +func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { patchOptions := &client.PatchOptions{} patchOptions.ApplyOptions(opts) @@ -396,14 +486,111 @@ type fakeStatusWriter struct { client *fakeClient } -func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { +func (sw *fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { // TODO(droot): This results in full update of the obj (spec + status). Need // a way to update status field only. return sw.client.Update(ctx, obj, opts...) } -func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { +func (sw *fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { // TODO(droot): This results in full update of the obj (spec + status). Need // a way to update status field only. return sw.client.Patch(ctx, obj, patch, opts...) } + +func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "apps": + switch gvk.Kind { + case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet": + return true + } + case "autoscaling": + switch gvk.Kind { + case "HorizontalPodAutoscaler": + return true + } + case "batch": + switch gvk.Kind { + case "CronJob", "Job": + return true + } + case "certificates": + switch gvk.Kind { + case "Certificates": + return true + } + case "flowcontrol": + switch gvk.Kind { + case "FlowSchema", "PriorityLevelConfiguration": + return true + } + case "networking": + switch gvk.Kind { + case "Ingress", "IngressClass", "NetworkPolicy": + return true + } + case "policy": + switch gvk.Kind { + case "PodSecurityPolicy": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "scheduling": + switch gvk.Kind { + case "PriorityClass": + return true + } + case "settings": + switch gvk.Kind { + case "PodPreset": + return true + } + case "storage": + switch gvk.Kind { + case "StorageClass": + return true + } + case "": + switch gvk.Kind { + case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node", + "PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate", + "ReplicationController", "ResourceQuota", "Secret", "Service", + "ServiceAccount", "EndpointSlice": + return true + } + } + + return false +} + +func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "coordination": + switch gvk.Kind { + case "Lease": + return true + } + case "node": + switch gvk.Kind { + case "RuntimeClass": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "": + switch gvk.Kind { + case "Endpoint", "Event", "LimitRange", "Service": + return true + } + } + + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go index a45d703320a..7d680690dc1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go @@ -17,17 +17,23 @@ limitations under the License. /* Package fake provides a fake client for testing. -Deprecated: please use pkg/envtest for testing. This package will be dropped -before the v1.0.0 release. - -An fake client is backed by its simple object store indexed by GroupVersionResource. +A fake client is backed by its simple object store indexed by GroupVersionResource. You can create a fake client with optional objects. - client := NewFakeClient(initObjs...) // initObjs is a slice of runtime.Object + client := NewFakeClientWithScheme(scheme, initObjs...) // initObjs is a slice of runtime.Object You can invoke the methods defined in the Client interface. -When it doubt, it's almost always better not to use this package and instead use +When in doubt, it's almost always better not to use this package and instead use envtest.Environment with a real client and API server. + +WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️ +- This client does not have a way to inject specific errors to test handled vs. unhandled errors. +- There is some support for sub resources which can cause issues with tests if you're trying to update + e.g. metadata and status in the same reconcile. +- No OpeanAPI validation is performed when creating or updating objects. +- ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update +operations that rely on these fields will fail, or give false positives. + */ package fake diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 9c96947f813..09636968f1c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -30,12 +30,8 @@ import ( type ObjectKey = types.NamespacedName // ObjectKeyFromObject returns the ObjectKey given a runtime.Object -func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return ObjectKey{}, err - } - return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} } // Patch is a patch that can be applied to a Kubernetes object. @@ -53,32 +49,32 @@ type Reader interface { // Get retrieves an obj for the given object key from the Kubernetes Cluster. // obj must be a struct pointer so that obj can be updated with the response // returned by the Server. - Get(ctx context.Context, key ObjectKey, obj runtime.Object) error + Get(ctx context.Context, key ObjectKey, obj Object) error // List retrieves list of objects for a given namespace and list options. On a // successful call, Items field in the list will be populated with the // result returned from the server. - List(ctx context.Context, list runtime.Object, opts ...ListOption) error + List(ctx context.Context, list ObjectList, opts ...ListOption) error } // Writer knows how to create, delete, and update Kubernetes objects. type Writer interface { // Create saves the object obj in the Kubernetes cluster. - Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error + Create(ctx context.Context, obj Object, opts ...CreateOption) error // Delete deletes the given obj from Kubernetes cluster. - Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error // Update updates the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + Update(ctx context.Context, obj Object, opts ...UpdateOption) error // Patch patches the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error // DeleteAllOf deletes all objects of the given type matching the given options. - DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error } // StatusClient knows how to create a client which can update status subresource @@ -92,12 +88,12 @@ type StatusWriter interface { // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + Update(ctx context.Context, obj Object, opts ...UpdateOption) error // Patch patches the given object's subresource. obj must be a struct // pointer so that obj can be updated with the content returned by the // Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error } // Client knows how to perform CRUD operations on Kubernetes objects. @@ -105,12 +101,17 @@ type Client interface { Reader Writer StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper } // IndexerFunc knows how to take an object and turn it into a series // of non-namespaced keys. Namespaced objects are automatically given // namespaced and non-spaced variants, so keys do not need to include namespace. -type IndexerFunc func(runtime.Object) []string +type IndexerFunc func(Object) []string // FieldIndexer knows how to index over a particular "field" such that it // can later be used by a field selector. @@ -122,7 +123,7 @@ type FieldIndexer interface { // and "equality" in the field selector means that at least one key matches the value. // The FieldIndexer will automatically take care of indexing over namespace // and supporting efficient all-namespace queries. - IndexField(ctx context.Context, obj runtime.Object, field string, extractValue IndexerFunc) error + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error } // IgnoreNotFound returns nil on NotFound errors. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 00000000000..6587a194072 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,193 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 00000000000..5ed8baca9bc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,253 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// isNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func isNamespaced(c Client, obj runtime.Object) (bool, error) { + var gvk schema.GroupVersionKind + var err error + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + + isUnstructured = isUnstructured || isUnstructuredList + if isUnstructured { + gvk = obj.GetObjectKind().GroupVersionKind() + } else { + gvk, err = apiutil.GVKForObject(obj, c.Scheme()) + if err != nil { + return false, err + } + } + + gk := schema.GroupKind{ + Group: gvk.Group, + Kind: gvk.Kind, + } + restmapping, err := c.RESTMapper().RESTMapping(gk) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("Scope cannot be identified. Empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + +// Create implements clinet.Client +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + isNamespaceScoped, err := isNamespaced(n.client, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("Namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj) +} + +// List implements client.Client +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %v", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 00000000000..31e334d6c2d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 131bdc2a04d..f2532764667 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -173,11 +173,6 @@ func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { var _ CreateOption = &CreateOptions{} -// CreateDryRunAll sets the "dry run" option to "all". -// -// Deprecated: Use DryRunAll -var CreateDryRunAll = DryRunAll - // }}} // {{{ Delete Options @@ -460,14 +455,6 @@ func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { m.ApplyToList(&opts.ListOptions) } -// MatchingField filters the list operation on the given field selector -// (or index in the case of cached lists). -// -// Deprecated: Use MatchingFields -func MatchingField(name, val string) MatchingFields { - return MatchingFields{name: val} -} - // MatchingFields filters the list/delete operation on the given field Set // (or index in the case of cached lists). type MatchingFields fields.Set @@ -595,11 +582,6 @@ func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { } } -// UpdateDryRunAll sets the "dry run" option to "all". -// -// Deprecated: Use DryRunAll -var UpdateDryRunAll = DryRunAll - // }}} // {{{ Patch Options @@ -682,11 +664,6 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } -// PatchDryRunAll sets the "dry run" option to "all". -// -// Deprecated: Use DryRunAll -var PatchDryRunAll = DryRunAll - // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go index 22a093cab0a..c32a06c06d5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -56,13 +56,6 @@ func RawPatch(patchType types.PatchType, data []byte) Patch { return &patch{patchType, data} } -// ConstantPatch constructs a new Patch with the given PatchType and data. -// -// Deprecated: use RawPatch instead -func ConstantPatch(patchType types.PatchType, data []byte) Patch { - return RawPatch(patchType, data) -} - // MergeFromWithOptimisticLock can be used if clients want to make sure a patch // is being applied to the latest resource version of an object. // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go index 47cba9576d4..90b75de37fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -19,42 +19,107 @@ package client import ( "context" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// DelegatingClient forms a Client by composing separate reader, writer and +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and // statusclient interfaces. This way, you can have an Client that reads from a // cache and writes to the API server. -type DelegatingClient struct { +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { Reader Writer StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme } -// DelegatingReader forms a Reader that will cause Get and List requests for +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for // unstructured types to use the ClientReader while requests for any other type // of object with use the CacheReader. This avoids accidentally caching the // entire cluster in the common case of loading arbitrary unstructured objects // (e.g. from OwnerReferences). -type DelegatingReader struct { +type delegatingReader struct { CacheReader Reader ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme } -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + _, isUncached := d.uncachedGVKs[gvk] _, isUnstructured := obj.(*unstructured.Unstructured) - if isUnstructured { + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUncached || isUnstructured || isUnstructuredList, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { return d.ClientReader.Get(ctx, key, obj) } return d.CacheReader.Get(ctx, key, obj) } // List retrieves list of objects for a given namespace and list options. -func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOption) error { - _, isUnstructured := list.(*unstructured.UnstructuredList) - if isUnstructured { +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { return d.ClientReader.List(ctx, list, opts...) } return d.CacheReader.List(ctx, list, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index d65f04fe9bd..a1b32653ca6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -22,6 +22,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + // client is a client.Client that reads and writes directly from/to an API server. It lazily initializes // new clients at the time they are used, and caches the client. type typedClient struct { @@ -30,7 +34,7 @@ type typedClient struct { } // Create implements client.Client -func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -48,7 +52,7 @@ func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...Cr } // Update implements client.Client -func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -67,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...Up } // Delete implements client.Client -func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -86,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...De } // DeleteAllOf implements client.Client -func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -105,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts } // Patch implements client.Client -func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -128,7 +132,7 @@ func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch } // Get implements client.Client -func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { r, err := c.cache.getResource(obj) if err != nil { return err @@ -140,7 +144,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object } // List implements client.Client -func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { return err @@ -156,7 +160,7 @@ func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...List } // UpdateStatus used by StatusWriter to write status. -func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -177,7 +181,7 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts } // PatchStatus used by StatusWriter to write status. -func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 5613791b9f7..f8fb3ccec18 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -25,6 +25,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + // client is a client.Client that reads and writes directly from/to an API server. It lazily initializes // new clients at the time they are used, and caches the client. type unstructuredClient struct { @@ -33,7 +37,7 @@ type unstructuredClient struct { } // Create implements client.Client -func (uc *unstructuredClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -61,7 +65,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj runtime.Object, op } // Update implements client.Client -func (uc *unstructuredClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -90,7 +94,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj runtime.Object, op } // Delete implements client.Client -func (uc *unstructuredClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -113,7 +117,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj runtime.Object, op } // DeleteAllOf implements client.Client -func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -136,7 +140,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj runtime.Objec } // Patch implements client.Client -func (uc *unstructuredClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -164,7 +168,7 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj runtime.Object, pat } // Get implements client.Client -func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -190,7 +194,7 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj runtim } // List implements client.Client -func (uc *unstructuredClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -217,7 +221,7 @@ func (uc *unstructuredClient) List(ctx context.Context, obj runtime.Object, opts Into(obj) } -func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -239,7 +243,7 @@ func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj runtime.Obje Into(obj) } -func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go new file mode 100644 index 00000000000..fce75d7bfbb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + ioutil "io/ioutil" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the functions necessary to parse a config file +// and to configure the Options struct for the ctrl.Manager +type ControllerManagerConfiguration interface { + runtime.Object + + // Complete returns the versioned configuration + Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) +} + +// DeferredFileLoader is used to configure the decoder for loading controller +// runtime component config types +type DeferredFileLoader struct { + ControllerManagerConfiguration + path string + scheme *runtime.Scheme + once sync.Once + err error +} + +// File will set up the deferred file loader for the configuration +// this will also configure the defaults for the loader if nothing is +// +// Defaults: +// Path: "./config.yaml" +// Kind: GenericControllerManagerConfiguration +func File() *DeferredFileLoader { + scheme := runtime.NewScheme() + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + return &DeferredFileLoader{ + path: "./config.yaml", + ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{}, + scheme: scheme, + } +} + +// Complete will use sync.Once to set the scheme +func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { + d.once.Do(d.loadFile) + if d.err != nil { + return v1alpha1.ControllerManagerConfigurationSpec{}, d.err + } + return d.ControllerManagerConfiguration.Complete() +} + +// AtPath will set the path to load the file for the decoder +func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { + d.path = path + return d +} + +// OfKind will set the type to be used for decoding the file into +func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { + d.ControllerManagerConfiguration = obj + return d +} + +// InjectScheme will configure the scheme to be used for decoding the file +func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { + d.scheme = scheme + return nil +} + +// loadFile is used from the mutex.Once to load the file +func (d *DeferredFileLoader) loadFile() { + if d.scheme == nil { + d.err = fmt.Errorf("scheme not supplied to controller configuration loader") + return + } + + content, err := ioutil.ReadFile(d.path) + if err != nil { + d.err = fmt.Errorf("could not read file at %s", d.path) + return + } + + codecs := serializer.NewCodecFactory(d.scheme) + + // Regardless of if the bytes are of any external version, + // it will be read successfully and converted into the internal version + if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { + d.err = fmt.Errorf("could not decode file into runtime.Object") + } + + return +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go new file mode 100644 index 00000000000..ebd8243f32d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains functionality for interacting with ComponentConfig +// files +// +// DeferredFileLoader +// +// This uses a deferred file decoding allowing you to chain your configuration +// setup. You can pass this into manager.Options#File and it will load your +// config. +package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go new file mode 100644 index 00000000000..1e3adbafb86 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 provides the ControllerManagerConfiguration used for +// configuring ctrl.Manager +// +kubebuilder:object:generate=true +package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go new file mode 100644 index 00000000000..72baa27f194 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + SchemeBuilder.Register(&ControllerManagerConfiguration{}) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go new file mode 100644 index 00000000000..25c406375b6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -0,0 +1,127 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration +type ControllerManagerConfigurationSpec struct { + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // CacheNamespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + // +optional + CacheNamespace string `json:"cacheNamespace,omitempty"` + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leadere election lease is lost. + GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"` + + // Metrics contains thw controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + + // Webhook contains the controllers webhook configuration + // +optional + Webhook ControllerWebhook `json:"webhook,omitempty"` +} + +// ControllerMetrics defines the metrics configs +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + // ReadinessEndpointName, defaults to "readyz" + // +optional + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + + // LivenessEndpointName, defaults to "healthz" + // +optional + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} + +// ControllerWebhook defines the webhook server for the controller +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + // +optional + Port *int `json:"port,omitempty"` + + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + // +optional + Host string `json:"host,omitempty"` + + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // +optional + CertDir string `json:"certDir,omitempty"` +} + +// +kubebuilder:object:root=true + +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API +type ControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfiguration returns the contfigurations for controllers + ControllerManagerConfigurationSpec `json:",inline"` +} + +// Complete returns the configuration for controller-runtime +func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { + return *c, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..5deb12fad76 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,119 @@ +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration. +func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(ControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) { + *out = *in + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(configv1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.GracefulShutdownTimeout != nil { + in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout + *out = new(v1.Duration) + **out = **in + } + out.Metrics = in.Metrics + out.Health = in.Health + in.Webhook.DeepCopyInto(&out.Webhook) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec. +func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerManagerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook. +func (in *ControllerWebhook) DeepCopy() *ControllerWebhook { + if in == nil { + return nil + } + out := new(ControllerWebhook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index b7b0f55124a..d670723e660 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "github.com/go-logr/logr" @@ -43,7 +44,8 @@ type Options struct { // The overall is a token bucket and the per-item is exponential. RateLimiter ratelimiter.RateLimiter - // Log is the logger used for this controller. + // Log is the logger used for this controller and passed to each reconciliation + // request via the context field. Log logr.Logger } @@ -63,9 +65,12 @@ type Controller interface { // EventHandler if all provided Predicates evaluate to true. Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error - // Start starts the controller. Start blocks until stop is closed or a + // Start starts the controller. Start blocks until the context is closed or a // controller has an error starting. - Start(stop <-chan struct{}) error + Start(ctx context.Context) error + + // GetLogger returns this controller logger prefilled with basic information. + GetLogger() logr.Logger } // New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have @@ -91,6 +96,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller return nil, fmt.Errorf("must specify Name for Controller") } + if options.Log == nil { + options.Log = mgr.GetLogger() + } + if options.MaxConcurrentReconciles <= 0 { options.MaxConcurrentReconciles = 1 } @@ -99,10 +108,6 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller options.RateLimiter = workqueue.DefaultControllerRateLimiter() } - if options.Log == nil { - options.Log = mgr.GetLogger() - } - // Inject dependencies into Reconciler if err := mgr.SetFields(options.Reconciler); err != nil { return nil, err @@ -117,6 +122,6 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller MaxConcurrentReconciles: options.MaxConcurrentReconciles, SetFields: mgr.SetFields, Name: name, - Log: options.Log.WithName("controller").WithValues("controller", name), + Log: options.Log.WithName("controller").WithName(name), }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index 9c8ec25768b..462781bd378 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -19,11 +19,12 @@ package controllerutil import ( "context" "fmt" + "reflect" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/utils/pointer" @@ -180,6 +181,10 @@ const ( // They should complete the sentence "Deployment default/foo has been .. OperationResultCreated OperationResult = "created" // OperationResultUpdated means that an existing resource is updated OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" ) // CreateOrUpdate creates or updates the given object in the Kubernetes @@ -189,12 +194,8 @@ const ( // They should complete the sentence "Deployment default/foo has been .. // The MutateFn is called regardless of creating or updating an object. // // It returns the executed operation and an error. -func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) { - key, err := client.ObjectKeyFromObject(obj) - if err != nil { - return OperationResultNone, err - } - +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { if !errors.IsNotFound(err) { return OperationResultNone, err @@ -223,12 +224,110 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f return OperationResultUpdated, nil } +// CreateOrPatch creates or patches the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the before +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !errors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject()) + statusPatch := client.MergeFrom(obj.DeepCopyObject()) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + // mutate wraps a MutateFn and applies validation to its result -func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { if err := f(); err != nil { return err } - if newKey, err := client.ObjectKeyFromObject(obj); err != nil || key != newKey { + if newKey := client.ObjectKeyFromObject(obj); key != newKey { return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") } return nil @@ -238,7 +337,7 @@ func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { type MutateFn func() error // AddFinalizer accepts an Object and adds the provided finalizer if not present. -func AddFinalizer(o Object, finalizer string) { +func AddFinalizer(o client.Object, finalizer string) { f := o.GetFinalizers() for _, e := range f { if e == finalizer { @@ -248,21 +347,8 @@ func AddFinalizer(o Object, finalizer string) { o.SetFinalizers(append(f, finalizer)) } -// AddFinalizerWithError tries to convert a runtime object to a metav1 object and add the provided finalizer. -// It returns an error if the provided object cannot provide an accessor. -// -// Deprecated: Use AddFinalizer instead. Check is performing on compile time. -func AddFinalizerWithError(o runtime.Object, finalizer string) error { - m, err := meta.Accessor(o) - if err != nil { - return err - } - AddFinalizer(m.(Object), finalizer) - return nil -} - // RemoveFinalizer accepts an Object and removes the provided finalizer if present. -func RemoveFinalizer(o Object, finalizer string) { +func RemoveFinalizer(o client.Object, finalizer string) { f := o.GetFinalizers() for i := 0; i < len(f); i++ { if f[i] == finalizer { @@ -273,21 +359,8 @@ func RemoveFinalizer(o Object, finalizer string) { o.SetFinalizers(f) } -// RemoveFinalizerWithError tries to convert a runtime object to a metav1 object and remove the provided finalizer. -// It returns an error if the provided object cannot provide an accessor. -// -// Deprecated: Use RemoveFinalizer instead. Check is performing on compile time. -func RemoveFinalizerWithError(o runtime.Object, finalizer string) error { - m, err := meta.Accessor(o) - if err != nil { - return err - } - RemoveFinalizer(m.(Object), finalizer) - return nil -} - // ContainsFinalizer checks an Object that the provided finalizer is present. -func ContainsFinalizer(o Object, finalizer string) bool { +func ContainsFinalizer(o client.Object, finalizer string) bool { f := o.GetFinalizers() for _, e := range f { if e == finalizer { @@ -299,7 +372,6 @@ func ContainsFinalizer(o Object, finalizer string) bool { // Object allows functions to work indistinctly with any resource that // implements both Object interfaces. -type Object interface { - metav1.Object - runtime.Object -} +// +// Deprecated: Use client.Object instead. +type Object = client.Object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index 2266a4da984..0b002115a87 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -29,7 +29,6 @@ import ( "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -45,7 +44,7 @@ type CRDInstallOptions struct { Paths []string // CRDs is a list of CRDs to install - CRDs []runtime.Object + CRDs []client.Object // ErrorIfPathMissing will cause an error if a Path does not exist ErrorIfPathMissing bool @@ -66,7 +65,7 @@ const defaultPollInterval = 100 * time.Millisecond const defaultMaxWait = 10 * time.Second // InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory -func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]runtime.Object, error) { +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]client.Object, error) { defaultCRDOptions(&options) // Read the CRD yamls into options.CRDs @@ -111,7 +110,7 @@ func defaultCRDOptions(o *CRDInstallOptions) { } // WaitForCRDs waits for the CRDs to appear in discovery -func WaitForCRDs(config *rest.Config, crds []runtime.Object, options CRDInstallOptions) error { +func WaitForCRDs(config *rest.Config, crds []client.Object, options CRDInstallOptions) error { // Add each CRD to a map of GroupVersion to Resource waitingFor := map[schema.GroupVersion]*sets.String{} for _, crd := range runtimeCRDListToUnstructured(crds) { @@ -128,14 +127,17 @@ func WaitForCRDs(config *rest.Config, crds []runtime.Object, options CRDInstallO if err != nil { return err } - if crdVersion != "" { - gvs = append(gvs, schema.GroupVersion{Group: crdGroup, Version: crdVersion}) - } - - versions, _, err := unstructured.NestedSlice(crd.Object, "spec", "versions") + versions, found, err := unstructured.NestedSlice(crd.Object, "spec", "versions") if err != nil { return err } + + // gvs should be added here only if single version is found. If multiple version is found we will add those version + // based on the version is served or not. + if crdVersion != "" && !found { + gvs = append(gvs, schema.GroupVersion{Group: crdGroup, Version: crdVersion}) + } + for _, version := range versions { versionMap, ok := version.(map[string]interface{}) if !ok { @@ -244,7 +246,7 @@ func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { } // CreateCRDs creates the CRDs -func CreateCRDs(config *rest.Config, crds []runtime.Object) error { +func CreateCRDs(config *rest.Config, crds []client.Object) error { cs, err := client.New(config, client.Options{}) if err != nil { return err @@ -274,7 +276,7 @@ func CreateCRDs(config *rest.Config, crds []runtime.Object) error { } // renderCRDs iterate through options.Paths and extract all CRD files. -func renderCRDs(options *CRDInstallOptions) ([]runtime.Object, error) { +func renderCRDs(options *CRDInstallOptions) ([]client.Object, error) { var ( err error info os.FileInfo @@ -325,7 +327,7 @@ func renderCRDs(options *CRDInstallOptions) ([]runtime.Object, error) { } // Converting map to a list to return - var res []runtime.Object + var res []client.Object for _, obj := range crds { res = append(res, obj) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go index 00dfa24648c..ba222998cad 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/helper.go @@ -5,6 +5,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" ) var ( @@ -38,7 +39,7 @@ func mergePaths(s1, s2 []string) []string { // mergeCRDs merges two CRD slices using their names. // This function makes no guarantees about order of the merged slice. -func mergeCRDs(s1, s2 []runtime.Object) []runtime.Object { +func mergeCRDs(s1, s2 []client.Object) []client.Object { m := make(map[string]*unstructured.Unstructured) for _, obj := range runtimeCRDListToUnstructured(s1) { m[obj.GetName()] = obj @@ -46,7 +47,7 @@ func mergeCRDs(s1, s2 []runtime.Object) []runtime.Object { for _, obj := range runtimeCRDListToUnstructured(s2) { m[obj.GetName()] = obj } - merged := make([]runtime.Object, len(m)) + merged := make([]client.Object, len(m)) i := 0 for _, obj := range m { merged[i] = obj @@ -55,7 +56,7 @@ func mergeCRDs(s1, s2 []runtime.Object) []runtime.Object { return merged } -func runtimeCRDListToUnstructured(l []runtime.Object) []*unstructured.Unstructured { +func runtimeCRDListToUnstructured(l []client.Object) []*unstructured.Unstructured { res := []*unstructured.Unstructured{} for _, obj := range l { u := &unstructured.Unstructured{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index a1f84af2a3d..0bbf789a9de 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/internal/testing/integration" @@ -62,14 +62,22 @@ const ( defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second ) -// Default binary path for test framework -func defaultAssetPath(binary string) string { - assetPath := os.Getenv(envKubebuilderPath) - if assetPath == "" { - assetPath = defaultKubebuilderPath +// getBinAssetPath returns a path for binary from the following list of locations, +// ordered by precedence: +// 0. KUBEBUILDER_ASSETS +// 1. Environment.BinaryAssetsDirectory +// 2. The default path, "/usr/local/kubebuilder/bin" +func (te *Environment) getBinAssetPath(binary string) string { + valueFromEnvVar := os.Getenv(envKubebuilderPath) + if valueFromEnvVar != "" { + return filepath.Join(valueFromEnvVar, binary) } - return filepath.Join(assetPath, binary) + if te.BinaryAssetsDirectory != "" { + return filepath.Join(te.BinaryAssetsDirectory, binary) + } + + return filepath.Join(defaultKubebuilderPath, binary) } // ControlPlane is the re-exported ControlPlane type from the internal integration package @@ -106,13 +114,17 @@ type Environment struct { // CRDs is a list of CRDs to install. // If both this field and CRDs field in CRDInstallOptions are specified, the // values are merged. - CRDs []runtime.Object + CRDs []client.Object // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. // If both this field and Paths field in CRDInstallOptions are specified, the // values are merged. CRDDirectoryPaths []string + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + BinaryAssetsDirectory string + // UseExisting indicates that this environments should use an // existing kubeconfig, instead of trying to stand up a new control plane. // This is useful in cases that need aggregated API servers and the like. @@ -217,14 +229,14 @@ func (te *Environment) Start() (*rest.Config, error) { } if os.Getenv(envKubeAPIServerBin) == "" { - te.ControlPlane.APIServer.Path = defaultAssetPath("kube-apiserver") + te.ControlPlane.APIServer.Path = te.getBinAssetPath("kube-apiserver") } if os.Getenv(envEtcdBin) == "" { - te.ControlPlane.Etcd.Path = defaultAssetPath("etcd") + te.ControlPlane.Etcd.Path = te.getBinAssetPath("etcd") } if os.Getenv(envKubectlBin) == "" { // we can't just set the path manually (it's behind a function), so set the environment variable instead - if err := os.Setenv(envKubectlBin, defaultAssetPath("kubectl")); err != nil { + if err := os.Setenv(envKubectlBin, te.getBinAssetPath("kubectl")); err != nil { return nil, err } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go index b64b7e09e53..73431337e3b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -40,14 +40,14 @@ import ( // WebhookInstallOptions are the options for installing mutating or validating webhooks type WebhookInstallOptions struct { - // Paths is a list of paths to the directories containing the mutating or validating webhooks yaml or json configs. - DirectoryPaths []string + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string // MutatingWebhooks is a list of MutatingWebhookConfigurations to install - MutatingWebhooks []runtime.Object + MutatingWebhooks []client.Object // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install - ValidatingWebhooks []runtime.Object + ValidatingWebhooks []client.Object // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true IgnoreErrorIfPathMissing bool @@ -64,6 +64,9 @@ type WebhookInstallOptions struct { // it will be automatically populated by the local temp dir LocalServingCertDir string + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + // MaxTime is the max time to wait MaxTime time.Duration @@ -143,13 +146,17 @@ func (o *WebhookInstallOptions) generateHostPort() (string, error) { return net.JoinHostPort(host, fmt.Sprintf("%d", port)), nil } -// Install installs specified webhooks to the API server -func (o *WebhookInstallOptions) Install(config *rest.Config) error { +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { hookCA, err := o.setupCA() if err != nil { return err } - if err := parseWebhookDirs(o); err != nil { + if err := parseWebhook(o); err != nil { return err } @@ -158,6 +165,15 @@ func (o *WebhookInstallOptions) Install(config *rest.Config) error { return err } + return nil +} + +// Install installs specified webhooks to the API server +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { return err } @@ -179,8 +195,8 @@ func (o *WebhookInstallOptions) Cleanup() error { // WaitForWebhooks waits for the Webhooks to be available through API server func WaitForWebhooks(config *rest.Config, - mutatingWebhooks []runtime.Object, - validatingWebhooks []runtime.Object, + mutatingWebhooks []client.Object, + validatingWebhooks []client.Object, options WebhookInstallOptions) error { waitingFor := map[schema.GroupVersionKind]*sets.String{} @@ -273,10 +289,11 @@ func (o *WebhookInstallOptions) setupCA() ([]byte, error) { return nil, fmt.Errorf("unable to write webhook serving key to disk: %v", err) } + o.LocalServingCAData = certData return certData, nil } -func createWebhooks(config *rest.Config, mutHooks []runtime.Object, valHooks []runtime.Object) error { +func createWebhooks(config *rest.Config, mutHooks []client.Object, valHooks []client.Object) error { cs, err := client.New(config, client.Options{}) if err != nil { return err @@ -319,10 +336,10 @@ func ensureCreated(cs client.Client, obj *unstructured.Unstructured) error { return nil } -// parseWebhookDirs reads the directories of Webhooks in options.DirectoryPaths and adds the Webhook structs to options -func parseWebhookDirs(options *WebhookInstallOptions) error { - if len(options.DirectoryPaths) > 0 { - for _, path := range options.DirectoryPaths { +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { _, err := os.Stat(path) if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { continue // skip this path @@ -343,20 +360,28 @@ func parseWebhookDirs(options *WebhookInstallOptions) error { // readWebhooks reads the Webhooks from files and Unmarshals them into structs // returns slice of mutating and validating webhook configurations -func readWebhooks(path string) ([]runtime.Object, []runtime.Object, error) { +func readWebhooks(path string) ([]client.Object, []client.Object, error) { // Get the webhook files var files []os.FileInfo var err error log.V(1).Info("reading Webhooks from path", "path", path) - if files, err = ioutil.ReadDir(path); err != nil { + info, err := os.Stat(path) + if err != nil { return nil, nil, err } + if !info.IsDir() { + path, files = filepath.Dir(path), []os.FileInfo{info} + } else { + if files, err = ioutil.ReadDir(path); err != nil { + return nil, nil, err + } + } // file extensions that may contain Webhooks resourceExtensions := sets.NewString(".json", ".yaml", ".yml") - var mutHooks []runtime.Object - var valHooks []runtime.Object + var mutHooks []client.Object + var valHooks []client.Object for _, file := range files { // Only parse allowlisted file types if !resourceExtensions.Has(filepath.Ext(file.Name())) { @@ -408,7 +433,7 @@ func readWebhooks(path string) ([]runtime.Object, []runtime.Object, error) { return mutHooks, valHooks, nil } -func runtimeListToUnstructured(l []runtime.Object) []*unstructured.Unstructured { +func runtimeListToUnstructured(l []client.Object) []*unstructured.Unstructured { res := []*unstructured.Unstructured{} for _, obj := range l { m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go index 6aa50bf3015..271b3c00fb3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -16,45 +16,30 @@ limitations under the License. package event -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) +import "sigs.k8s.io/controller-runtime/pkg/client" // CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type CreateEvent struct { - // Meta is the ObjectMeta of the Kubernetes Type that was created - Meta metav1.Object - // Object is the object from the event - Object runtime.Object + Object client.Object } // UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type UpdateEvent struct { - // MetaOld is the ObjectMeta of the Kubernetes Type that was updated (before the update) - MetaOld metav1.Object - // ObjectOld is the object from the event - ObjectOld runtime.Object - - // MetaNew is the ObjectMeta of the Kubernetes Type that was updated (after the update) - MetaNew metav1.Object + ObjectOld client.Object // ObjectNew is the object from the event - ObjectNew runtime.Object + ObjectNew client.Object } // DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type DeleteEvent struct { - // Meta is the ObjectMeta of the Kubernetes Type that was deleted - Meta metav1.Object - // Object is the object from the event - Object runtime.Object + Object client.Object // DeleteStateUnknown is true if the Delete event was missed but we identified the object // as having been deleted. @@ -65,9 +50,6 @@ type DeleteEvent struct { // GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an // handler.EventHandler. type GenericEvent struct { - // Meta is the ObjectMeta of a Kubernetes Type this event is for - Meta metav1.Object - // Object is the object from the event - Object runtime.Object + Object client.Object } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index 2464c8b6714..9f72302d1c5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -35,31 +35,31 @@ type EnqueueRequestForObject struct{} // Create implements EventHandler func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { + if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), }}) } // Update implements EventHandler func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - if evt.MetaOld != nil { + if evt.ObjectOld != nil { q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.MetaOld.GetName(), - Namespace: evt.MetaOld.GetNamespace(), + Name: evt.ObjectOld.GetName(), + Namespace: evt.ObjectOld.GetNamespace(), }}) } else { enqueueLog.Error(nil, "UpdateEvent received with no old metadata", "event", evt) } - if evt.MetaNew != nil { + if evt.ObjectNew != nil { q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.MetaNew.GetName(), - Namespace: evt.MetaNew.GetNamespace(), + Name: evt.ObjectNew.GetName(), + Namespace: evt.ObjectNew.GetNamespace(), }}) } else { enqueueLog.Error(nil, "UpdateEvent received with no new metadata", "event", evt) @@ -68,24 +68,24 @@ func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.Rate // Delete implements EventHandler func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { + if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), }}) } // Generic implements EventHandler func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { + if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), }}) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index a60790242c5..f98ec25638c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -17,15 +17,16 @@ limitations under the License. package handler import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestsFromMapFunc{} +// MapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. +type MapFunc func(client.Object) []reconcile.Request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -37,34 +38,42 @@ var _ EventHandler = &EnqueueRequestsFromMapFunc{} // // For UpdateEvents which contain both a new and old object, the transformation function is run on both // objects and both sets of Requests are enqueue. -type EnqueueRequestsFromMapFunc struct { +func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { + return &enqueueRequestsFromMapFunc{ + toRequests: fn, + } +} + +var _ EventHandler = &enqueueRequestsFromMapFunc{} + +type enqueueRequestsFromMapFunc struct { // Mapper transforms the argument into a slice of keys to be reconciled - ToRequests Mapper + toRequests MapFunc } // Create implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) +func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + e.mapAndEnqueue(q, evt.Object) } // Update implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.MetaOld, Object: evt.ObjectOld}) - e.mapAndEnqueue(q, MapObject{Meta: evt.MetaNew, Object: evt.ObjectNew}) +func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + e.mapAndEnqueue(q, evt.ObjectOld) + e.mapAndEnqueue(q, evt.ObjectNew) } // Delete implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) +func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + e.mapAndEnqueue(q, evt.Object) } // Generic implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) +func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + e.mapAndEnqueue(q, evt.Object) } -func (e *EnqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object MapObject) { - for _, req := range e.ToRequests.Map(object) { +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object) { + for _, req := range e.toRequests(object) { q.Add(req) } } @@ -72,34 +81,9 @@ func (e *EnqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter // EnqueueRequestsFromMapFunc can inject fields into the mapper. // InjectFunc implements inject.Injector. -func (e *EnqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { +func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { if f == nil { return nil } - return f(e.ToRequests) -} - -// Mapper maps an object to a collection of keys to be enqueued -type Mapper interface { - // Map maps an object - Map(MapObject) []reconcile.Request -} - -// MapObject contains information from an event to be transformed into a Request. -type MapObject struct { - // Meta is the meta data for an object from an event. - Meta metav1.Object - - // Object is the object from an event. - Object runtime.Object -} - -var _ Mapper = ToRequestsFunc(nil) - -// ToRequestsFunc implements Mapper using a function. -type ToRequestsFunc func(MapObject) []reconcile.Request - -// Map implements Mapper -func (m ToRequestsFunc) Map(i MapObject) []reconcile.Request { - return m(i) + return f(e.toRequests) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 17d512696c5..925b9e3c2da 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -59,31 +59,31 @@ type EnqueueRequestForOwner struct { // Create implements EventHandler func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { + for _, req := range e.getOwnerReconcileRequest(evt.Object) { q.Add(req) } } // Update implements EventHandler func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.MetaOld) { + for _, req := range e.getOwnerReconcileRequest(evt.ObjectOld) { q.Add(req) } - for _, req := range e.getOwnerReconcileRequest(evt.MetaNew) { + for _, req := range e.getOwnerReconcileRequest(evt.ObjectNew) { q.Add(req) } } // Delete implements EventHandler func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { + for _, req := range e.getOwnerReconcileRequest(evt.Object) { q.Add(req) } } // Generic implements EventHandler func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { + for _, req := range e.getOwnerReconcileRequest(evt.Object) { q.Add(req) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index bb782d2fad1..113b6ff91b2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -17,6 +17,8 @@ limitations under the License. package controller import ( + "context" + "errors" "fmt" "sync" "time" @@ -27,6 +29,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" @@ -69,10 +72,15 @@ type Controller struct { // Started is true if the Controller has been Started Started bool - // TODO(community): Consider initializing a logger with the Controller Name as the tag + // ctx is the context that was passed to Start() and used when starting watches. + // + // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, + // while we usually always strive to follow best practices, we consider this a legacy case and it should + // undergo a major refactoring and redesign to allow for context to not be stored in a struct. + ctx context.Context - // watches maintains a list of sources, handlers, and predicates to start when the controller is started. - watches []watchDescription + // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. + startWatches []watchDescription // Log is used to log messages to users during reconciliation, or for example when a watch is started. Log logr.Logger @@ -86,8 +94,10 @@ type watchDescription struct { } // Reconcile implements reconcile.Reconciler -func (c *Controller) Reconcile(r reconcile.Request) (reconcile.Result, error) { - return c.Do.Reconcile(r) +func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace) + ctx = logf.IntoContext(ctx, log) + return c.Do.Reconcile(ctx, req) } // Watch implements controller.Controller @@ -108,20 +118,29 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc } } - c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct}) - if c.Started { - c.Log.Info("Starting EventSource", "source", src) - return src.Start(evthdler, c.Queue, prct...) + // Controller hasn't started yet, store the watches locally and return. + // + // These watches are going to be held on the controller struct until the manager or user calls Start(...). + if !c.Started { + c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct}) + return nil } - return nil + c.Log.Info("Starting EventSource", "source", src) + return src.Start(c.ctx, evthdler, c.Queue, prct...) } // Start implements controller.Controller -func (c *Controller) Start(stop <-chan struct{}) error { +func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling // but lock outside to get proper handling of the queue shutdown c.mu.Lock() + if c.Started { + return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") + } + + // Set the internal context. + c.ctx = ctx c.Queue = c.MakeQueue() defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed @@ -135,9 +154,9 @@ func (c *Controller) Start(stop <-chan struct{}) error { // NB(directxman12): launch the sources *before* trying to wait for the // caches to sync so that they have a chance to register their intendeded // caches. - for _, watch := range c.watches { + for _, watch := range c.startWatches { c.Log.Info("Starting EventSource", "source", watch.src) - if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil { + if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { return err } } @@ -145,12 +164,12 @@ func (c *Controller) Start(stop <-chan struct{}) error { // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches c.Log.Info("Starting Controller") - for _, watch := range c.watches { + for _, watch := range c.startWatches { syncingSource, ok := watch.src.(source.SyncingSource) if !ok { continue } - if err := syncingSource.WaitForSync(stop); err != nil { + if err := syncingSource.WaitForSync(ctx); err != nil { // This code is unreachable in case of kube watches since WaitForCacheSync will never return an error // Leaving it here because that could happen in the future err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) @@ -159,15 +178,26 @@ func (c *Controller) Start(stop <-chan struct{}) error { } } + // All the watches have been started, we can reset the local slice. + // + // We should never hold watches more than necessary, each watch source can hold a backing cache, + // which won't be garbage collected if we hold a reference to it. + c.startWatches = nil + if c.JitterPeriod == 0 { c.JitterPeriod = 1 * time.Second } // Launch workers to process resources c.Log.Info("Starting workers", "worker count", c.MaxConcurrentReconciles) + ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) for i := 0; i < c.MaxConcurrentReconciles; i++ { - // Process work items - go wait.Until(c.worker, c.JitterPeriod, stop) + go wait.UntilWithContext(ctx, func(ctx context.Context) { + // Run a worker thread that just dequeues items, processes them, and marks them done. + // It enforces that the reconcileHandler is never invoked concurrently with the same object. + for c.processNextWorkItem(ctx) { + } + }, c.JitterPeriod) } c.Started = true @@ -177,21 +207,14 @@ func (c *Controller) Start(stop <-chan struct{}) error { return err } - <-stop + <-ctx.Done() c.Log.Info("Stopping workers") return nil } -// worker runs a worker thread that just dequeues items, processes them, and marks them done. -// It enforces that the reconcileHandler is never invoked concurrently with the same object. -func (c *Controller) worker() { - for c.processNextWorkItem() { - } -} - // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the reconcileHandler. -func (c *Controller) processNextWorkItem() bool { +func (c *Controller) processNextWorkItem(ctx context.Context) bool { obj, shutdown := c.Queue.Get() if shutdown { // Stop working @@ -206,10 +229,14 @@ func (c *Controller) processNextWorkItem() bool { // period. defer c.Queue.Done(obj) - return c.reconcileHandler(obj) + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1) + defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1) + + c.reconcileHandler(ctx, obj) + return true } -func (c *Controller) reconcileHandler(obj interface{}) bool { +func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { // Update metrics after processing each item reconcileStartTS := time.Now() defer func() { @@ -225,19 +252,20 @@ func (c *Controller) reconcileHandler(obj interface{}) bool { c.Queue.Forget(obj) c.Log.Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj) // Return true, don't take a break - return true + return } log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace) + ctx = logf.IntoContext(ctx, log) // RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the // resource to be synced. - if result, err := c.Do.Reconcile(req); err != nil { + if result, err := c.Do.Reconcile(ctx, req); err != nil { c.Queue.AddRateLimited(req) - log.Error(err, "Reconciler error") ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc() - return false + log.Error(err, "Reconciler error") + return } else if result.RequeueAfter > 0 { // The result.RequeueAfter request will be lost, if it is returned // along with a non-nil error. But this is intended as @@ -246,23 +274,23 @@ func (c *Controller) reconcileHandler(obj interface{}) bool { c.Queue.Forget(obj) c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc() - return true + return } else if result.Requeue { c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue").Inc() - return true + return } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.Queue.Forget(obj) - // TODO(directxman12): What does 1 mean? Do we want level constants? Do we want levels at all? - log.V(1).Info("Successfully Reconciled") - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "success").Inc() - // Return true, don't take a break - return true +} + +// GetLogger returns this controller's logger. +func (c *Controller) GetLogger() logr.Logger { + return c.Log } // InjectFunc implement SetFields.Injector diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index 53c0bb332c6..126ded66090 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -43,6 +43,22 @@ var ( ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "controller_runtime_reconcile_time_seconds", Help: "Length of time per reconciliation per controller", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + }, []string{"controller"}) + + // WorkerCount is a prometheus metric which holds the number of + // concurrent reconciles per controller + WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_max_concurrent_reconciles", + Help: "Maximum number of concurrent reconciles per controller", + }, []string{"controller"}) + + // ActiveWorkers is a prometheus metric which holds the number + // of active workers per controller + ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_active_workers", + Help: "Number of currently used workers per controller", }, []string{"controller"}) ) @@ -51,6 +67,8 @@ func init() { ReconcileTotal, ReconcileErrors, ReconcileTime, + WorkerCount, + ActiveWorkers, // expose process metrics like CPU, Memory, file descriptor usage etc. prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), // expose Go runtime metrics like GC stats, memory stats etc. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go index 0d671b73d6b..d91a0ca50cf 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -14,9 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package log contains utilities for fetching a new logger -// when one is not already available. -// Deprecated: use pkg/log package log import ( diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 824b0dd993b..c699f04ec04 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -17,7 +17,9 @@ limitations under the License. package recorder import ( + "context" "fmt" + "sync" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -26,35 +28,129 @@ import ( typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/recorder" ) -type provider struct { +// EventBroadcasterProducer makes an event broadcaster, returning +// whether or not the broadcaster should be stopped with the Provider, +// or not (e.g. if it's shared, it shouldn't be stopped with the Provider). +type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool) + +// Provider is a recorder.Provider that records events to the k8s API server +// and to a logr Logger. +type Provider struct { // scheme to specify when creating a recorder scheme *runtime.Scheme - // eventBroadcaster to create new recorder instance - eventBroadcaster record.EventBroadcaster // logger is the logger to use when logging diagnostic event info - logger logr.Logger + logger logr.Logger + evtClient typedcorev1.EventInterface + makeBroadcaster EventBroadcasterProducer + + broadcasterOnce sync.Once + broadcaster record.EventBroadcaster + stopBroadcaster bool +} + +// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to +// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election +// code finishes up and tries to continue emitting events. + +// Stop attempts to stop this provider, stopping the underlying broadcaster +// if the broadcaster asked to be stopped. It kinda tries to honor the given +// context, but the underlying broadcaster has an indefinite wait that doesn't +// return until all queued events are flushed, so this may end up just returning +// before the underlying wait has finished instead of cancelling the wait. +// This is Very Frustrating™. +func (p *Provider) Stop(shutdownCtx context.Context) { + doneCh := make(chan struct{}) + + go func() { + // technically, this could start the broadcaster, but practically, it's + // almost certainly already been started (e.g. by leader election). We + // need to invoke this to ensure that we don't inadvertently race with + // an invocation of getBroadcaster. + broadcaster := p.getBroadcaster() + if p.stopBroadcaster { + broadcaster.Shutdown() + } + close(doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-doneCh: + } +} + +// getBroadcaster ensures that a broadcaster is started for this +// provider, and returns it. It's threadsafe. +func (p *Provider) getBroadcaster() record.EventBroadcaster { + // NB(directxman12): this can technically still leak if something calls + // "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we + // create the broadcaster in start, we could race with other things that + // are started at the same time & want to emit events. The alternative is + // silently swallowing events and more locking, but that seems suboptimal. + + p.broadcasterOnce.Do(func() { + broadcaster, stop := p.makeBroadcaster() + broadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.evtClient}) + broadcaster.StartEventWatcher( + func(e *corev1.Event) { + p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message) + }) + p.broadcaster = broadcaster + p.stopBroadcaster = stop + }) + + return p.broadcaster } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) { +func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { clientSet, err := kubernetes.NewForConfig(config) if err != nil { return nil, fmt.Errorf("failed to init clientSet: %w", err) } - p := &provider{scheme: scheme, logger: logger, eventBroadcaster: broadcaster} - p.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientSet.CoreV1().Events("")}) - p.eventBroadcaster.StartEventWatcher( - func(e *corev1.Event) { - p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message) - }) - + p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: clientSet.CoreV1().Events("")} return p, nil } -func (p *provider) GetEventRecorderFor(name string) record.EventRecorder { - return p.eventBroadcaster.NewRecorder(p.scheme, corev1.EventSource{Component: name}) +// GetEventRecorderFor returns an event recorder that broadcasts to this provider's +// broadcaster. All events will be associated with a component of the given name. +func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder { + return &lazyRecorder{ + prov: p, + name: name, + } +} + +// lazyRecorder is a recorder that doesn't actually instantiate any underlying +// recorder until the first event is emitted. +type lazyRecorder struct { + prov *Provider + name string + + recOnce sync.Once + rec record.EventRecorder +} + +// ensureRecording ensures that a concrete recorder is populated for this recorder. +func (l *lazyRecorder) ensureRecording() { + l.recOnce.Do(func() { + broadcaster := l.prov.getBroadcaster() + l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name}) + }) +} + +func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) { + l.ensureRecording() + l.rec.Event(object, eventtype, reason, message) +} +func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + l.rec.Eventf(object, eventtype, reason, messageFmt, args...) +} +func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go index 24654faa3cc..37a9aefab58 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ /* -Package leaderelection contains a constructors for a leader election resource lock. +Package leaderelection contains a constructor for a leader election resource lock. This is used to ensure that multiple copies of a controller manager can be run with only one active set of controllers, for active-passive HA. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go index ed9361e8f31..0173f6e2f47 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -37,22 +37,32 @@ type Options struct { // starting the manager. LeaderElection bool + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "configmapsleases". + LeaderElectionResourceLock string + // LeaderElectionNamespace determines the namespace in which the leader - // election configmap will be created. + // election resource will be created. LeaderElectionNamespace string - // LeaderElectionID determines the name of the configmap that leader election + // LeaderElectionID determines the name of the resource that leader election // will use for holding the leader lock. LeaderElectionID string } -// NewResourceLock creates a new config map resource lock for use in a leader -// election loop +// NewResourceLock creates a new resource lock for use in a leader election loop. func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) { if !options.LeaderElection { return nil, nil } + // Default resource lock to "configmapsleases". We must keep this default until we are sure all controller-runtime + // users have upgraded from the original default ConfigMap lock to a controller-runtime version that has this new + // default. Many users of controller-runtime skip versions, so we should be extremely conservative here. + if options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = resourcelock.ConfigMapsLeasesResourceLock + } + // LeaderElectionID must be provided to prevent clashes if options.LeaderElectionID == "" { return nil, errors.New("LeaderElectionID must be configured") @@ -80,8 +90,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op return nil, err } - // TODO(JoelSpeed): switch to leaderelection object in 1.12 - return resourcelock.New(resourcelock.ConfigMapsResourceLock, + return resourcelock.New(options.LeaderElectionResourceLock, options.LeaderElectionNamespace, options.LeaderElectionID, client.CoreV1(), diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 082e2bce31e..8923530c467 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -39,10 +39,6 @@ import ( "github.com/go-logr/logr" ) -var ( - contextKey = &struct{}{} -) - // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { Log.Fulfill(l) @@ -55,19 +51,17 @@ var Log = NewDelegatingLogger(NullLogger{}) // FromContext returns a logger with predefined values from a context.Context. func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { - var log logr.Logger - if ctx == nil { - log = Log - } else { - lv := ctx.Value(contextKey) - log = lv.(logr.Logger) + var log logr.Logger = Log + if ctx != nil { + if logger := logr.FromContext(ctx); logger != nil { + log = logger + } } - log.WithValues(keysAndValues...) - return log + return log.WithValues(keysAndValues...) } // IntoContext takes a context and sets the logger as one of its keys. // Use FromContext function to retrieve the logger. func IntoContext(ctx context.Context, log logr.Logger) context.Context { - return context.WithValue(ctx, contextKey, log) + return logr.NewContext(ctx, log) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/client_builder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/client_builder.go new file mode 100644 index 00000000000..cc9f0817f02 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/client_builder.go @@ -0,0 +1,61 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ClientBuilder builder is the interface for the client builder. +type ClientBuilder interface { + // WithUncached takes a list of runtime objects (plain or lists) that users don't want to cache + // for this client. This function can be called multiple times, it should append to an internal slice. + WithUncached(objs ...client.Object) ClientBuilder + + // Build returns a new client. + Build(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) +} + +// NewClientBuilder returns a builder to build new clients to be passed when creating a Manager. +func NewClientBuilder() ClientBuilder { + return &newClientBuilder{} +} + +type newClientBuilder struct { + uncached []client.Object +} + +func (n *newClientBuilder) WithUncached(objs ...client.Object) ClientBuilder { + n.uncached = append(n.uncached, objs...) + return n +} + +func (n *newClientBuilder) Build(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { + // Create the Client for Write operations. + c, err := client.New(config, options) + if err != nil { + return nil, err + } + + return client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cache, + Client: c, + UncachedObjects: n.uncached, + }) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 0424b84a904..a9fe180d304 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -38,9 +38,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/recorder" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -52,12 +51,12 @@ const ( defaultRetryPeriod = 2 * time.Second defaultGracefulShutdownPeriod = 30 * time.Second - defaultReadinessEndpoint = "/readyz/" - defaultLivenessEndpoint = "/healthz/" + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" defaultMetricsEndpoint = "/metrics" ) -var log = logf.RuntimeLog.WithName("manager") +var _ Runnable = &controllerManager{} type controllerManager struct { // config is the rest.config used to talk to the apiserver. Required. @@ -89,11 +88,15 @@ type controllerManager struct { // recorderProvider is used to generate event recorders that will be injected into Controllers // (and EventHandlers, Sources and Predicates). - recorderProvider recorder.Provider + recorderProvider *intrec.Provider // resourceLock forms the basis for leader election resourceLock resourcelock.Interface + // leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease + // on shutdown + leaderElectionReleaseOnCancel bool + // mapper is used to map resources to kind, and map kind and version. mapper meta.RESTMapper @@ -124,16 +127,6 @@ type controllerManager struct { healthzStarted bool errChan chan error - // internalStop is the stop channel *actually* used by everything involved - // with the manager as a stop channel, so that we can pass a stop channel - // to things that need it off the bat (like the Channel source). It can - // be closed via `internalStopper` (by being the same underlying channel). - internalStop <-chan struct{} - - // internalStopper is the write side of the internal stop channel, allowing us to close it. - // It and `internalStop` should point to the same channel. - internalStopper chan<- struct{} - // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. logger logr.Logger @@ -151,7 +144,7 @@ type controllerManager struct { // election was configured. elected chan struct{} - startCache func(stop <-chan struct{}) error + startCache func(ctx context.Context) error // port is the port that the webhook server serves at. port int @@ -190,6 +183,13 @@ type controllerManager struct { // after the gracefulShutdownTimeout ended. It must not be accessed before internalStop // is closed because it will be nil. shutdownCtx context.Context + + internalCtx context.Context + internalCancel context.CancelFunc + + // internalProceduresStop channel is used internally to the manager when coordinating + // the proper shutdown of servers. This channel is also used for dependency injection. + internalProceduresStop chan struct{} } // Add sets dependencies on i, and adds it to the list of Runnables to start. @@ -243,13 +243,13 @@ func (cm *controllerManager) SetFields(i interface{}) error { if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { return err } - if _, err := inject.StopChannelInto(cm.internalStop, i); err != nil { + if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { return err } if _, err := inject.MapperInto(cm.mapper, i); err != nil { return err } - if _, err := inject.LoggerInto(log, i); err != nil { + if _, err := inject.LoggerInto(cm.logger, i); err != nil { return err } return nil @@ -270,7 +270,7 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha } cm.metricsExtraHandlers[path] = handler - log.V(2).Info("Registering metrics http server extra handler", "path", path) + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) return nil } @@ -349,24 +349,39 @@ func (cm *controllerManager) GetAPIReader() client.Reader { } func (cm *controllerManager) GetWebhookServer() *webhook.Server { - if cm.webhookServer == nil { + server, wasNew := func() (*webhook.Server, bool) { + cm.mu.Lock() + defer cm.mu.Unlock() + + if cm.webhookServer != nil { + return cm.webhookServer, false + } + cm.webhookServer = &webhook.Server{ Port: cm.port, Host: cm.host, CertDir: cm.certDir, } - if err := cm.Add(cm.webhookServer); err != nil { - panic("unable to add webhookServer to the controller manager") + return cm.webhookServer, true + }() + + // only add the server if *we ourselves* just registered it. + // Add has its own lock, so just do this separately -- there shouldn't + // be a "race" in this lock gap because the condition is the population + // of cm.webhookServer, not anything to do with Add. + if wasNew { + if err := cm.Add(server); err != nil { + panic("unable to add webhook server to the controller manager") } } - return cm.webhookServer + return server } func (cm *controllerManager) GetLogger() logr.Logger { return cm.logger } -func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { +func (cm *controllerManager) serveMetrics() { handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) @@ -387,8 +402,8 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { Handler: mux, } // Run the server - cm.startRunnable(RunnableFunc(func(stop <-chan struct{}) error { - log.Info("starting metrics server", "path", defaultMetricsEndpoint) + cm.startRunnable(RunnableFunc(func(_ context.Context) error { + cm.logger.Info("starting metrics server", "path", defaultMetricsEndpoint) if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { return err } @@ -396,13 +411,13 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { })) // Shutdown the server when stop is closed - <-stop + <-cm.internalProceduresStop if err := server.Shutdown(cm.shutdownCtx); err != nil { cm.errChan <- err } } -func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { +func (cm *controllerManager) serveHealthProbes() { // TODO(hypnoglow): refactor locking to use anonymous func in the similar way // it's done in serveMetrics. cm.mu.Lock() @@ -410,16 +425,20 @@ func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { if cm.readyzHandler != nil { mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) } if cm.healthzHandler != nil { mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) } server := http.Server{ Handler: mux, } // Run server - cm.startRunnable(RunnableFunc(func(stop <-chan struct{}) error { + cm.startRunnable(RunnableFunc(func(_ context.Context) error { if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed { return err } @@ -429,17 +448,19 @@ func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { cm.mu.Unlock() // Shutdown the server when stop is closed - <-stop + <-cm.internalProceduresStop if err := server.Shutdown(cm.shutdownCtx); err != nil { cm.errChan <- err } } -func (cm *controllerManager) Start(stop <-chan struct{}) (err error) { +func (cm *controllerManager) Start(ctx context.Context) (err error) { + cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request stopComplete := make(chan struct{}) defer close(stopComplete) - // This must be deferred after closing stopComplete, otherwise we deadlock + // This must be deferred after closing stopComplete, otherwise we deadlock. defer func() { // https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg stopErr := cm.engageStopProcedure(stopComplete) @@ -466,12 +487,12 @@ func (cm *controllerManager) Start(stop <-chan struct{}) (err error) { // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused) if cm.metricsListener != nil { - go cm.serveMetrics(cm.internalStop) + go cm.serveMetrics() } // Serve health probes if cm.healthProbeListener != nil { - go cm.serveHealthProbes(cm.internalStop) + go cm.serveHealthProbes() } go cm.startNonLeaderElectionRunnables() @@ -490,7 +511,7 @@ func (cm *controllerManager) Start(stop <-chan struct{}) (err error) { }() select { - case <-stop: + case <-ctx.Done(): // We are done return nil case err := <-cm.errChan: @@ -501,15 +522,20 @@ func (cm *controllerManager) Start(stop <-chan struct{}) (err error) { // engageStopProcedure signals all runnables to stop, reads potential errors // from the errChan and waits for them to end. It must not be called more than once. -func (cm *controllerManager) engageStopProcedure(stopComplete chan struct{}) error { - var cancel context.CancelFunc +func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error { + // Populate the shutdown context. + var shutdownCancel context.CancelFunc if cm.gracefulShutdownTimeout > 0 { - cm.shutdownCtx, cancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) + cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) } else { - cm.shutdownCtx, cancel = context.WithCancel(context.Background()) + cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background()) } - defer cancel() - close(cm.internalStopper) + defer shutdownCancel() + + // Cancel the internal stop channel and wait for the procedures to stop and complete. + close(cm.internalProceduresStop) + cm.internalCancel() + // Start draining the errors before acquiring the lock to make sure we don't deadlock // if something that has the lock is blocked on trying to write into the unbuffered // channel after something else already wrote into it. @@ -518,7 +544,7 @@ func (cm *controllerManager) engageStopProcedure(stopComplete chan struct{}) err select { case err, ok := <-cm.errChan: if ok { - log.Error(err, "error received after stop sequence was engaged") + cm.logger.Error(err, "error received after stop sequence was engaged") } case <-stopComplete: return @@ -531,14 +557,17 @@ func (cm *controllerManager) engageStopProcedure(stopComplete chan struct{}) err cm.mu.Lock() defer cm.mu.Unlock() cm.stopProcedureEngaged = true - return cm.waitForRunnableToEnd(cm.shutdownCtx, cancel) + + // we want to close this after the other runnables stop, because we don't + // want things like leader election to try and emit events on a closed + // channel + defer cm.recorderProvider.Stop(cm.shutdownCtx) + return cm.waitForRunnableToEnd(shutdownCancel) } // waitForRunnableToEnd blocks until all runnables ended or the // tearDownTimeout was reached. In the latter case, an error is returned. -func (cm *controllerManager) waitForRunnableToEnd(ctx context.Context, cancel context.CancelFunc) error { - defer cancel() - +func (cm *controllerManager) waitForRunnableToEnd(shutdownCancel context.CancelFunc) error { // Cancel leader election only after we waited. It will os.Exit() the app for safety. defer func() { if cm.leaderElectionCancel != nil { @@ -548,11 +577,11 @@ func (cm *controllerManager) waitForRunnableToEnd(ctx context.Context, cancel co go func() { cm.waitForRunnable.Wait() - cancel() + shutdownCancel() }() - <-ctx.Done() - if err := ctx.Err(); err != nil && err != context.Canceled { + <-cm.shutdownCtx.Done() + if err := cm.shutdownCtx.Err(); err != nil && err != context.Canceled { return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err) } return nil @@ -562,7 +591,7 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { cm.mu.Lock() defer cm.mu.Unlock() - cm.waitForCache() + cm.waitForCache(cm.internalCtx) // Start the non-leaderelection Runnables after the cache has synced for _, c := range cm.nonLeaderElectionRunnables { @@ -576,7 +605,7 @@ func (cm *controllerManager) startLeaderElectionRunnables() { cm.mu.Lock() defer cm.mu.Unlock() - cm.waitForCache() + cm.waitForCache(cm.internalCtx) // Start the leader election Runnables after the cache has synced for _, c := range cm.leaderElectionRunnables { @@ -588,7 +617,7 @@ func (cm *controllerManager) startLeaderElectionRunnables() { cm.startedLeader = true } -func (cm *controllerManager) waitForCache() { +func (cm *controllerManager) waitForCache(ctx context.Context) { if cm.started { return } @@ -597,13 +626,13 @@ func (cm *controllerManager) waitForCache() { if cm.startCache == nil { cm.startCache = cm.cache.Start } - cm.startRunnable(RunnableFunc(func(stop <-chan struct{}) error { - return cm.startCache(stop) + cm.startRunnable(RunnableFunc(func(ctx context.Context) error { + return cm.startCache(ctx) })) // Wait for the caches to sync. // TODO(community): Check the return value and write a test - cm.cache.WaitForCacheSync(cm.internalStop) + cm.cache.WaitForCacheSync(ctx) // TODO: This should be the return value of cm.cache.WaitForCacheSync but we abuse // cm.started as check if we already started the cache so it must always become true. // Making sure that the cache doesn't get started twice is needed to not get a "close @@ -640,6 +669,7 @@ func (cm *controllerManager) startLeaderElection() (err error) { }, OnStoppedLeading: cm.onStoppedLeading, }, + ReleaseOnCancel: cm.leaderElectionReleaseOnCancel, }) if err != nil { return err @@ -658,7 +688,7 @@ func (cm *controllerManager) startRunnable(r Runnable) { cm.waitForRunnable.Add(1) go func() { defer cm.waitForRunnable.Done() - if err := r.Start(cm.internalStop); err != nil { + if err := r.Start(cm.internalCtx); err != nil { cm.errChan <- err } }() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index c55399d02b5..f69030391cd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -17,14 +17,16 @@ limitations under the License. package manager import ( + "context" "fmt" "net" "net/http" + "reflect" "time" "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -33,12 +35,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/healthz" - internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -74,12 +79,13 @@ type Manager interface { // AddReadyzCheck allows you to add Readyz checker AddReadyzCheck(name string, check healthz.Checker) error - // Start starts all registered Controllers and blocks until the Stop channel is closed. + // Start starts all registered Controllers and blocks until the context is cancelled. // Returns an error if there is an error starting any controller. + // // If LeaderElection is used, the binary must be exited immediately after this returns, // otherwise components that need leader election might continue to run after the leader // lock was lost. - Start(<-chan struct{}) error + Start(ctx context.Context) error // GetConfig returns an initialized Config GetConfig() *rest.Config @@ -143,11 +149,26 @@ type Options struct { // starting the manager. LeaderElection bool + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "configmapsleases". Change this value only if you know what you are doing. + // Otherwise, users of your controller might end up with multiple running instances that + // each acquired leadership through different resource locks during upgrades and thus + // act on the same resources concurrently. + // If you want to migrate to the "leases" resource lock, you might do so by migrating to the + // respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a + // leader lock on both resources. After all your users have migrated to the multilock, you can + // go ahead and migrate to "leases". Please also keep in mind, that users might skip versions + // of your controller. + // + // Note: before controller-runtime version v0.7, the resource lock was set to "configmaps". + // Please keep this in mind, when planning a proper migration path for your controller. + LeaderElectionResourceLock string + // LeaderElectionNamespace determines the namespace in which the leader - // election configmap will be created. + // election resource will be created. LeaderElectionNamespace string - // LeaderElectionID determines the name of the configmap that leader election + // LeaderElectionID determines the name of the resource that leader election // will use for holding the leader lock. LeaderElectionID string @@ -155,6 +176,13 @@ type Options struct { // that is used to build the leader election client. LeaderElectionConfig *rest.Config + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader doesn't have to wait + // LeaseDuration time first. + LeaderElectionReleaseOnCancel bool + // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of // last observed ack. Default is 15 seconds. @@ -207,10 +235,14 @@ type Options struct { // by the manager. If not set this will use the default new cache function. NewCache cache.NewCacheFunc - // NewClient will create the client to be used by the manager. + // ClientBuilder is the builder that creates the client to be used by the manager. // If not set this will create the default DelegatingClient that will // use the cache for reads and the client for writes. - NewClient NewClientFunc + ClientBuilder ClientBuilder + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. @@ -218,42 +250,48 @@ type Options struct { // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. EventBroadcaster record.EventBroadcaster // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. // To disable graceful shutdown, set to time.Duration(0) // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) - // The graceful shutdown is skipped for safety reasons in case the leadere election lease is lost. + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. GracefulShutdownTimeout *time.Duration + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) newMetricsListener func(addr string) (net.Listener, error) newHealthProbeListener func(addr string) (net.Listener, error) } -// NewClientFunc allows a user to define how to create a client -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) - // Runnable allows a component to be started. // It's very important that Start blocks until // it's done running. type Runnable interface { // Start starts running the component. The component will stop running - // when the channel is closed. Start blocks until the channel is closed or + // when the context is closed. Start blocks until the context is closed or // an error occurs. - Start(<-chan struct{}) error + Start(context.Context) error } // RunnableFunc implements Runnable using a function. // It's very important that the given function block // until it's done running. -type RunnableFunc func(<-chan struct{}) error +type RunnableFunc func(context.Context) error // Start implements Runnable -func (r RunnableFunc) Start(s <-chan struct{}) error { - return r(s) +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) } // LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. @@ -276,7 +314,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // Create the mapper provider mapper, err := options.MapperProvider(config) if err != nil { - log.Error(err, "Failed to get API Group-Resources") + options.Logger.Error(err, "Failed to get API Group-Resources") return nil, err } @@ -286,12 +324,16 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } - apiReader, err := client.New(config, client.Options{Scheme: options.Scheme, Mapper: mapper}) + clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + + apiReader, err := client.New(config, clientOptions) if err != nil { return nil, err } - writeObj, err := options.NewClient(cache, config, client.Options{Scheme: options.Scheme, Mapper: mapper}) + writeObj, err := options.ClientBuilder. + WithUncached(options.ClientDisableCacheFor...). + Build(cache, config, clientOptions) if err != nil { return nil, err } @@ -303,7 +345,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events"), options.EventBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -314,9 +356,10 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderConfig = options.LeaderElectionConfig } resourceLock, err := options.newResourceLock(leaderConfig, recorderProvider, leaderelection.Options{ - LeaderElection: options.LeaderElection, - LeaderElectionID: options.LeaderElectionID, - LeaderElectionNamespace: options.LeaderElectionNamespace, + LeaderElection: options.LeaderElection, + LeaderElectionResourceLock: options.LeaderElectionResourceLock, + LeaderElectionID: options.LeaderElectionID, + LeaderElectionNamespace: options.LeaderElectionNamespace, }) if err != nil { return nil, err @@ -339,8 +382,6 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } - stop := make(chan struct{}) - return &controllerManager{ config: config, scheme: options.Scheme, @@ -354,8 +395,6 @@ func New(config *rest.Config, options Options) (Manager, error) { metricsListener: metricsListener, metricsExtraHandlers: metricsExtraHandlers, logger: options.Logger, - internalStop: stop, - internalStopper: stop, elected: make(chan struct{}), port: options.Port, host: options.Host, @@ -367,25 +406,106 @@ func New(config *rest.Config, options Options) (Manager, error) { readinessEndpointName: options.ReadinessEndpointName, livenessEndpointName: options.LivenessEndpointName, gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), }, nil } -// DefaultNewClient creates the default caching client -func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { - // Create the Client for Write operations. - c, err := client.New(config, options) +// AndFrom will use a supplied type and convert to Options +// any options already set on Options will be ignored, this is used to allow +// cli flags to override anything specified in the config file +func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { + if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { + err := inj.InjectScheme(o.Scheme) + if err != nil { + return o, err + } + } + + newObj, err := loader.Complete() if err != nil { - return nil, err + return o, err } - return &client.DelegatingClient{ - Reader: &client.DelegatingReader{ - CacheReader: cache, - ClientReader: c, - }, - Writer: c, - StatusClient: c, - }, nil + o = o.setLeaderElectionConfig(newObj) + + if o.SyncPeriod == nil && newObj.SyncPeriod != nil { + o.SyncPeriod = &newObj.SyncPeriod.Duration + } + + if o.Namespace == "" && newObj.CacheNamespace != "" { + o.Namespace = newObj.CacheNamespace + } + + if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" { + o.MetricsBindAddress = newObj.Metrics.BindAddress + } + + if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" { + o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress + } + + if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" { + o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName + } + + if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" { + o.LivenessEndpointName = newObj.Health.LivenessEndpointName + } + + if o.Port == 0 && newObj.Webhook.Port != nil { + o.Port = *newObj.Webhook.Port + } + + if o.Host == "" && newObj.Webhook.Host != "" { + o.Host = newObj.Webhook.Host + } + + if o.CertDir == "" && newObj.Webhook.CertDir != "" { + o.CertDir = newObj.Webhook.CertDir + } + + return o, nil +} + +// AndFromOrDie will use options.AndFrom() and will panic if there are errors +func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { + o, err := o.AndFrom(loader) + if err != nil { + panic(fmt.Sprintf("could not parse config file: %v", err)) + } + return o +} + +func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { + if o.LeaderElection == false && obj.LeaderElection.LeaderElect != nil { + o.LeaderElection = *obj.LeaderElection.LeaderElect + } + + if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" { + o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock + } + + if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" { + o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace + } + + if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" { + o.LeaderElectionID = obj.LeaderElection.ResourceName + } + + if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) { + o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration + } + + if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) { + o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration + } + + if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) { + o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration + } + + return o } // defaultHealthProbeListener creates the default health probes listener bound to the given address @@ -414,9 +534,9 @@ func setOptionsDefaults(options Options) Options { } } - // Allow newClient to be mocked - if options.NewClient == nil { - options.NewClient = DefaultNewClient + // Allow the client builder to be mocked + if options.ClientBuilder == nil { + options.ClientBuilder = NewClientBuilder() } // Allow newCache to be mocked @@ -426,7 +546,7 @@ func setOptionsDefaults(options Options) Options { // Allow newRecorderProvider to be mocked if options.newRecorderProvider == nil { - options.newRecorderProvider = internalrecorder.NewProvider + options.newRecorderProvider = intrec.NewProvider } // Allow newResourceLock to be mocked @@ -451,7 +571,14 @@ func setOptionsDefaults(options Options) Options { } if options.EventBroadcaster == nil { - options.EventBroadcaster = record.NewBroadcaster() + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } } if options.ReadinessEndpointName == "" { @@ -472,7 +599,7 @@ func setOptionsDefaults(options Options) Options { } if options.Logger == nil { - options.Logger = logf.Log + options.Logger = logf.RuntimeLog.WithName("manager") } return options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go index 08eaef7b420..9a85558f82d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go @@ -17,6 +17,7 @@ limitations under the License. package signals import ( + "context" "os" "os/signal" ) @@ -26,18 +27,19 @@ var onlyOneSignalHandler = make(chan struct{}) // SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned // which is closed on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. -func SetupSignalHandler() (stopCh <-chan struct{}) { +func SetupSignalHandler() context.Context { close(onlyOneSignalHandler) // panics when called twice - stop := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + c := make(chan os.Signal, 2) signal.Notify(c, shutdownSignals...) go func() { <-c - close(stop) + cancel() <-c os.Exit(1) // second signal. Exit directly. }() - return stop + return ctx } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index 792e225f7cc..71862f51a6d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -47,7 +47,6 @@ const ( var ( // client metrics - requestResult = prometheus.NewCounterVec(prometheus.CounterOpts{ Subsystem: RestClientSubsystem, Name: ResultKey, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 66f3e431be6..cdc4d8efdbf 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -17,9 +17,11 @@ limitations under the License. package predicate import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -44,6 +46,7 @@ type Predicate interface { var _ Predicate = Funcs{} var _ Predicate = ResourceVersionChangedPredicate{} var _ Predicate = GenerationChangedPredicate{} +var _ Predicate = AnnotationChangedPredicate{} var _ Predicate = or{} var _ Predicate = and{} @@ -97,19 +100,19 @@ func (p Funcs) Generic(e event.GenericEvent) bool { // NewPredicateFuncs returns a predicate funcs that applies the given filter function // on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied // to the new object. -func NewPredicateFuncs(filter func(meta metav1.Object, object runtime.Object) bool) Funcs { +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { return Funcs{ CreateFunc: func(e event.CreateEvent) bool { - return filter(e.Meta, e.Object) + return filter(e.Object) }, UpdateFunc: func(e event.UpdateEvent) bool { - return filter(e.MetaNew, e.ObjectNew) + return filter(e.ObjectNew) }, DeleteFunc: func(e event.DeleteEvent) bool { - return filter(e.Meta, e.Object) + return filter(e.Object) }, GenericFunc: func(e event.GenericEvent) bool { - return filter(e.Meta, e.Object) + return filter(e.Object) }, } } @@ -121,23 +124,16 @@ type ResourceVersionChangedPredicate struct { // Update implements default UpdateEvent filter for validating resource version change func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { - if e.MetaOld == nil { - log.Error(nil, "UpdateEvent has no old metadata", "event", e) - return false - } if e.ObjectOld == nil { - log.Error(nil, "GenericEvent has no old runtime object to update", "event", e) + log.Error(nil, "Update event has no old object to update", "event", e) return false } if e.ObjectNew == nil { - log.Error(nil, "GenericEvent has no new runtime object for update", "event", e) + log.Error(nil, "Update event has no new object to update", "event", e) return false } - if e.MetaNew == nil { - log.Error(nil, "UpdateEvent has no new metadata", "event", e) - return false - } - return e.MetaNew.GetResourceVersion() != e.MetaOld.GetResourceVersion() + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() } // GenerationChangedPredicate implements a default update predicate function on Generation change. @@ -162,23 +158,46 @@ type GenerationChangedPredicate struct { // Update implements default UpdateEvent filter for validating generation change func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { - if e.MetaOld == nil { - log.Error(nil, "Update event has no old metadata", "event", e) - return false - } if e.ObjectOld == nil { - log.Error(nil, "Update event has no old runtime object to update", "event", e) + log.Error(nil, "Update event has no old object to update", "event", e) return false } if e.ObjectNew == nil { - log.Error(nil, "Update event has no new runtime object for update", "event", e) + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating annotation change +func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) return false } - if e.MetaNew == nil { - log.Error(nil, "Update event has no new metadata", "event", e) + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) return false } - return e.MetaNew.GetGeneration() != e.MetaOld.GetGeneration() + + return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) } // And returns a composite predicate that implements a logical AND of the predicates passed to it. @@ -270,3 +289,15 @@ func (o or) Generic(e event.GenericEvent) bool { } return false } + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index c6f7f64a65e..b2159c531f6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -17,6 +17,7 @@ limitations under the License. package reconcile import ( + "context" "time" "k8s.io/apimachinery/pkg/types" @@ -89,13 +90,13 @@ type Reconciler interface { // Reconciler performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. - Reconcile(Request) (Result, error) + Reconcile(context.Context, Request) (Result, error) } // Func is a function that implements the reconcile interface. -type Func func(Request) (Result, error) +type Func func(context.Context, Request) (Result, error) var _ Reconciler = Func(nil) // Reconcile implements Reconciler. -func (r Func) Reconcile(o Request) (Result, error) { return r(o) } +func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go deleted file mode 100644 index 64bbcef3688..00000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package signals contains libraries for handling signals to gracefully -// shutdown the manager in combination with Kubernetes pod graceful termination -// policy. -// -// Deprecated: use pkg/manager/signals instead. -package signals - -import ( - "sigs.k8s.io/controller-runtime/pkg/manager/signals" -) - -var ( - // SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned - // which is closed on one of these signals. If a second signal is caught, the program - // is terminated with exit code 1. - SetupSignalHandler = signals.SetupSignalHandler -) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go index 871cd441f9b..9dc93a9b213 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -46,7 +46,7 @@ limitations under the License. // } // // func main() { -// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ // Scheme: scheme, // }) // // ... @@ -66,7 +66,7 @@ type Builder struct { runtime.SchemeBuilder } -// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. func (bld *Builder) Register(object ...runtime.Object) *Builder { bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { scheme.AddKnownTypes(bld.GroupVersion, object...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go index e5963079dea..33c4c413482 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -19,15 +19,13 @@ package internal import ( "fmt" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -46,20 +44,11 @@ type EventHandler struct { func (e EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} - // Pull metav1.Object out of the object - if o, err := meta.Accessor(obj); err == nil { - c.Meta = o - } else { - log.Error(err, "OnAdd missing Meta", - "object", obj, "type", fmt.Sprintf("%T", obj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := obj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { c.Object = o } else { - log.Error(nil, "OnAdd missing runtime.Object", + log.Error(nil, "OnAdd missing Object", "object", obj, "type", fmt.Sprintf("%T", obj)) return } @@ -78,17 +67,7 @@ func (e EventHandler) OnAdd(obj interface{}) { func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} - // Pull metav1.Object out of the object - if o, err := meta.Accessor(oldObj); err == nil { - u.MetaOld = o - } else { - log.Error(err, "OnUpdate missing MetaOld", - "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := oldObj.(runtime.Object); ok { + if o, ok := oldObj.(client.Object); ok { u.ObjectOld = o } else { log.Error(nil, "OnUpdate missing ObjectOld", @@ -96,21 +75,12 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - // Pull metav1.Object out of the object - if o, err := meta.Accessor(newObj); err == nil { - u.MetaNew = o - } else { - log.Error(err, "OnUpdate missing MetaNew", - "object", newObj, "type", fmt.Sprintf("%T", newObj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := newObj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := newObj.(client.Object); ok { u.ObjectNew = o } else { log.Error(nil, "OnUpdate missing ObjectNew", - "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + "object", newObj, "type", fmt.Sprintf("%T", newObj)) return } @@ -134,7 +104,7 @@ func (e EventHandler) OnDelete(obj interface{}) { // This should never happen if we aren't missing events, which we have concluded that we are not // and made decisions off of this belief. Maybe this shouldn't be here? var ok bool - if _, ok = obj.(metav1.Object); !ok { + if _, ok = obj.(client.Object); !ok { // If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { @@ -148,20 +118,11 @@ func (e EventHandler) OnDelete(obj interface{}) { obj = tombstone.Obj } - // Pull metav1.Object out of the object - if o, err := meta.Accessor(obj); err == nil { - d.Meta = o - } else { - log.Error(err, "OnDelete missing Meta", - "object", obj, "type", fmt.Sprintf("%T", obj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := obj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { d.Object = o } else { - log.Error(nil, "OnDelete missing runtime.Object", + log.Error(nil, "OnDelete missing Object", "object", obj, "type", fmt.Sprintf("%T", obj)) return } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index b2c6b2bbc31..fe0e47150ad 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -23,8 +23,8 @@ import ( "sync" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -54,20 +54,20 @@ const ( type Source interface { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. - Start(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error + Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error } // SyncingSource is a source that needs syncing prior to being usable. The controller // will call its WaitForSync prior to starting workers. type SyncingSource interface { Source - WaitForSync(stop <-chan struct{}) error + WaitForSync(ctx context.Context) error } // NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used // and not overwritten. It can be used to watch objects in a different cluster by passing the cache // from that other cluster -func NewKindWithCache(object runtime.Object, cache cache.Cache) SyncingSource { +func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { return &kindWithCache{kind: Kind{Type: object, cache: cache}} } @@ -75,19 +75,19 @@ type kindWithCache struct { kind Kind } -func (ks *kindWithCache) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface, +func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { - return ks.kind.Start(handler, queue, prct...) + return ks.kind.Start(ctx, handler, queue, prct...) } -func (ks *kindWithCache) WaitForSync(stop <-chan struct{}) error { - return ks.kind.WaitForSync(stop) +func (ks *kindWithCache) WaitForSync(ctx context.Context) error { + return ks.kind.WaitForSync(ctx) } // Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) type Kind struct { // Type is the type of object to watch. e.g. &v1.Pod{} - Type runtime.Object + Type client.Object // cache used to watch APIs cache cache.Cache @@ -97,7 +97,7 @@ var _ SyncingSource = &Kind{} // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. -func (ks *Kind) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface, +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { // Type should have been specified by the user. @@ -111,7 +111,7 @@ func (ks *Kind) Start(handler handler.EventHandler, queue workqueue.RateLimiting } // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, err := ks.cache.GetInformer(context.TODO(), ks.Type) + i, err := ks.cache.GetInformer(ctx, ks.Type) if err != nil { if kindMatchErr, ok := err.(*meta.NoKindMatchError); ok { log.Error(err, "if kind is a CRD, it should be installed before calling Start", @@ -132,8 +132,8 @@ func (ks *Kind) String() string { // WaitForSync implements SyncingSource to allow controllers to wait with starting // workers until the cache is synced. -func (ks *Kind) WaitForSync(stop <-chan struct{}) error { - if !ks.cache.WaitForCacheSync(stop) { +func (ks *Kind) WaitForSync(ctx context.Context) error { + if !ks.cache.WaitForCacheSync(ctx) { // Would be great to return something more informative here return errors.New("cache did not sync") } @@ -195,6 +195,7 @@ func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { // Start implements Source and should only be called by the Controller. func (cs *Channel) Start( + ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { @@ -213,12 +214,14 @@ func (cs *Channel) Start( cs.DestBufferSize = defaultBufferSize } + dst := make(chan event.GenericEvent, cs.DestBufferSize) + cs.dest = append(cs.dest, dst) + cs.once.Do(func() { // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source - go cs.syncLoop() + go cs.syncLoop(ctx) }) - dst := make(chan event.GenericEvent, cs.DestBufferSize) go func() { for evt := range dst { shouldHandle := true @@ -238,8 +241,6 @@ func (cs *Channel) Start( cs.destLock.Lock() defer cs.destLock.Unlock() - cs.dest = append(cs.dest, dst) - return nil } @@ -266,10 +267,10 @@ func (cs *Channel) distribute(evt event.GenericEvent) { } } -func (cs *Channel) syncLoop() { +func (cs *Channel) syncLoop(ctx context.Context) { for { select { - case <-cs.stop: + case <-ctx.Done(): // Close destination channels cs.doStop() return @@ -289,7 +290,7 @@ var _ Source = &Informer{} // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. -func (is *Informer) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface, +func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { // Informer should have been specified by the user. @@ -308,12 +309,12 @@ func (is *Informer) String() string { var _ Source = Func(nil) // Func is a function that implements Source -type Func func(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error // Start implements Source -func (f Func) Start(evt handler.EventHandler, queue workqueue.RateLimitingInterface, +func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, pr ...predicate.Predicate) error { - return f(evt, queue, pr...) + return f(ctx, evt, queue, pr...) } func (f Func) String() string { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 0a8de064245..6899011c9a7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -24,9 +24,10 @@ import ( "io/ioutil" "net/http" + v1 "k8s.io/api/admission/v1" "k8s.io/api/admission/v1beta1" - admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -35,7 +36,8 @@ var admissionScheme = runtime.NewScheme() var admissionCodecs = serializer.NewCodecFactory(admissionScheme) func init() { - utilruntime.Must(admissionv1beta1.AddToScheme(admissionScheme)) + utilruntime.Must(v1.AddToScheme(admissionScheme)) + utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) } var _ http.Handler = &Webhook{} @@ -70,12 +72,19 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + // Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1. + // The actual AdmissionReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. req := Request{} - ar := v1beta1.AdmissionReview{ - // avoid an extra copy - Request: &req.AdmissionRequest, - } - if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { + ar := unversionedAdmissionReview{} + // avoid an extra copy + ar.Request = &req.AdmissionRequest + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { wh.log.Error(err, "unable to decode the request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) @@ -85,20 +94,39 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // TODO: add panic-recovery for Handle reviewResponse = wh.Handle(r.Context(), req) - wh.writeResponse(w, reviewResponse) + wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) } +// writeResponse writes response to w generically, i.e. without encoding GVK information. func (wh *Webhook) writeResponse(w io.Writer, response Response) { - encoder := json.NewEncoder(w) - responseAdmissionReview := v1beta1.AdmissionReview{ + wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse}) +} + +// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary +// if multiple AdmissionReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) { + ar := v1.AdmissionReview{ Response: &response.AdmissionResponse, } - err := encoder.Encode(responseAdmissionReview) + // Default to a v1 AdmissionReview, otherwise the API server may not recognize the request + // if multiple AdmissionReview versions are permitted by the webhook config. + // TODO(estroz): this should be configurable since older API servers won't know about v1. + if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + } else { + ar.SetGroupVersionKind(*admRevGVK) + } + wh.writeAdmissionResponse(w, ar) +} + +// writeAdmissionResponse writes ar to w. +func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { + err := json.NewEncoder(w).Encode(ar) if err != nil { wh.log.Error(err, "unable to encode the response") wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) } else { - res := responseAdmissionReview.Response + res := ar.Response if log := wh.log; log.V(1).Enabled() { if res.Result != nil { log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) @@ -107,3 +135,10 @@ func (wh *Webhook) writeResponse(w io.Writer, response Response) { } } } + +// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types. +type unversionedAdmissionReview struct { + v1.AdmissionReview +} + +var _ runtime.Object = &unversionedAdmissionReview{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index 3b1625c69b2..e6179d37290 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -22,9 +22,10 @@ import ( "fmt" "net/http" - "gomodules.xyz/jsonpatch/v2" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) @@ -37,10 +38,10 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { if !resp.Allowed { return resp } - if resp.PatchType != nil && *resp.PatchType != admissionv1beta1.PatchTypeJSONPatch { + if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch { return Errored(http.StatusInternalServerError, fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", - resp.PatchType, admissionv1beta1.PatchTypeJSONPatch)) + resp.PatchType, admissionv1.PatchTypeJSONPatch)) } patches = append(patches, resp.Patches...) } @@ -50,13 +51,13 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err)) } return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, }, Patch: marshaledPatch, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), }, } } @@ -94,7 +95,7 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { } } return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index da8e3367889..541118498ba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -19,9 +19,8 @@ package admission import ( "net/http" - "gomodules.xyz/jsonpatch/v2" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -50,7 +49,7 @@ func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { // Errored creates a new Response for error-handling a request. func Errored(code int32, err error) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Code: code, @@ -67,7 +66,7 @@ func ValidationResponse(allowed bool, reason string) Response { code = http.StatusOK } resp := Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ Code: int32(code), @@ -90,9 +89,27 @@ func PatchResponseFromRaw(original, current []byte) Response { } return Response{ Patches: patches, - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } +} + +// validationResponseFromStatus returns a response for admitting a request with provided Status object. +func validationResponseFromStatus(allowed bool, status metav1.Status) Response { + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &status, }, } + return resp +} + +// WithWarnings adds the given warnings to the Response. +// If any warnings were already given, they will not be overwritten. +func (r Response) WithWarnings(warnings ...string) Response { + r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...) + return r } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 6defcfe92fe..926d4a5bd1a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -18,9 +18,11 @@ package admission import ( "context" + goerrors "errors" "net/http" - "k8s.io/api/admission/v1beta1" + v1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ) @@ -60,7 +62,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { // Get the object in the request obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1beta1.Create { + if req.Operation == v1.Create { err := h.decoder.Decode(req, obj) if err != nil { return Errored(http.StatusBadRequest, err) @@ -68,11 +70,15 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateCreate() if err != nil { + var apiStatus errors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } return Denied(err.Error()) } } - if req.Operation == v1beta1.Update { + if req.Operation == v1.Update { oldObj := obj.DeepCopyObject() err := h.decoder.DecodeRaw(req.Object, obj) @@ -86,11 +92,15 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateUpdate(oldObj) if err != nil { + var apiStatus errors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } return Denied(err.Error()) } } - if req.Operation == v1beta1.Delete { + if req.Operation == v1.Delete { // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted err := h.decoder.DecodeRaw(req.OldObject, obj) @@ -100,6 +110,10 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateDelete() if err != nil { + var apiStatus errors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } return Denied(err.Error()) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index 8f3430efbf5..485f1758a42 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -22,8 +22,8 @@ import ( "net/http" "github.com/go-logr/logr" - "gomodules.xyz/jsonpatch/v2" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" @@ -41,7 +41,7 @@ var ( // name, namespace), as well as the operation in question // (e.g. Get, Create, etc), and the object itself. type Request struct { - admissionv1beta1.AdmissionRequest + admissionv1.AdmissionRequest } // Response is the output of an admission handler. @@ -57,7 +57,7 @@ type Response struct { Patches []jsonpatch.JsonPatchOperation // AdmissionResponse is the raw admission response. // The Patch field in it will be overwritten by the listed patches. - admissionv1beta1.AdmissionResponse + admissionv1.AdmissionResponse } // Complete populates any fields that are yet to be set in @@ -84,7 +84,7 @@ func (r *Response) Complete(req Request) error { if err != nil { return err } - patchType := admissionv1beta1.PatchTypeJSONPatch + patchType := admissionv1.PatchTypeJSONPatch r.PatchType = &patchType return nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go index bd797fd738e..d681ef2a6bd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go @@ -17,6 +17,7 @@ limitations under the License. package certwatcher import ( + "context" "crypto/tls" "sync" @@ -69,7 +70,7 @@ func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, } // Start starts the watch on the certificate and key files. -func (cw *CertWatcher) Start(stopCh <-chan struct{}) error { +func (cw *CertWatcher) Start(ctx context.Context) error { files := []string{cw.certPath, cw.keyPath} for _, f := range files { @@ -82,8 +83,8 @@ func (cw *CertWatcher) Start(stopCh <-chan struct{}) error { log.Info("Starting certificate watcher") - // Block until the stop channel is closed. - <-stopCh + // Block until the context is done. + <-ctx.Done() return cw.watcher.Close() } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index e771a936cbf..721df490a04 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -37,7 +37,7 @@ import ( ) // DefaultPort is the default port that the webhook server serves. -var DefaultPort = 443 +var DefaultPort = 9443 // Server is an admission webhook server that can serve traffic and // generates related k8s resources for deploying. @@ -47,7 +47,7 @@ type Server struct { Host string // Port is the port number that the server will serve. - // It will be defaulted to 443 if unspecified. + // It will be defaulted to 9443 if unspecified. Port int // CertDir is the directory that contains the server key and certificate. The @@ -76,6 +76,9 @@ type Server struct { // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once + + // mu protects access to the webhook map & setFields for Start, Register, etc + mu sync.Mutex } // setDefaults does defaulting for the Server. @@ -111,6 +114,9 @@ func (*Server) NeedLeaderElection() bool { // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. func (s *Server) Register(path string, hook http.Handler) { + s.mu.Lock() + defer s.mu.Unlock() + s.defaultingOnce.Do(s.setDefaults) _, found := s.webhooks[path] if found { @@ -119,7 +125,28 @@ func (s *Server) Register(path string, hook http.Handler) { // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook s.WebhookMux.Handle(path, instrumentedHook(path, hook)) - log.Info("registering webhook", "path", path) + + regLog := log.WithValues("path", path) + regLog.Info("registering webhook") + + // we've already been "started", inject dependencies here. + // Otherwise, InjectFunc will do this for us later. + if s.setFields != nil { + if err := s.setFields(hook); err != nil { + // TODO(directxman12): swallowing this error isn't great, but we'd have to + // change the signature to fix that + regLog.Error(err, "unable to inject fields into webhook during registration") + } + + baseHookLog := log.WithName("webhooks") + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { + regLog.Error(err, "unable to logger into webhook during registration") + } + } } // instrumentedHook adds some instrumentation on top of the given webhook. @@ -145,27 +172,12 @@ func instrumentedHook(path string, hookRaw http.Handler) http.Handler { // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(stop <-chan struct{}) error { +func (s *Server) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") baseHookLog.Info("starting webhook server") - // inject fields here as opposed to in Register so that we're certain to have our setFields - // function available. - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } - } - certPath := filepath.Join(s.CertDir, s.CertName) keyPath := filepath.Join(s.CertDir, s.KeyName) @@ -175,7 +187,7 @@ func (s *Server) Start(stop <-chan struct{}) error { } go func() { - if err := certWatcher.Start(stop); err != nil { + if err := certWatcher.Start(ctx); err != nil { log.Error(err, "certificate watcher error") } }() @@ -215,7 +227,7 @@ func (s *Server) Start(stop <-chan struct{}) error { idleConnsClosed := make(chan struct{}) go func() { - <-stop + <-ctx.Done() log.Info("shutting down webhook server") // TODO: use a context with reasonable timeout @@ -226,8 +238,7 @@ func (s *Server) Start(stop <-chan struct{}) error { close(idleConnsClosed) }() - err = srv.Serve(listener) - if err != nil && err != http.ErrServerClosed { + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { return err } @@ -238,5 +249,20 @@ func (s *Server) Start(stop <-chan struct{}) error { // InjectFunc injects the field setter into the server. func (s *Server) InjectFunc(f inject.Func) error { s.setFields = f + + // inject fields here that weren't injected in Register because we didn't have setFields yet. + baseHookLog := log.WithName("webhooks") + for hookPath, webhook := range s.webhooks { + if err := s.setFields(webhook); err != nil { + return err + } + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { + return err + } + } return nil }