diff --git a/Makefile b/Makefile index 2979ae1..8a22487 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,9 @@ docker-image: --build-arg git_sha="$(GIT_SHA)" \ --build-arg version="$(VERSION)" \ . -build-ttl.sh: + +.PHONY: build-ttl.sh +build-ttl.sh: ## Build the EKCO Docker container and deploy it to ttl.sh for use in the development environment docker build \ -t ttl.sh/${CURRENT_USER}/ekco:12h \ -f deploy/Dockerfile \ @@ -76,3 +78,8 @@ build-ttl.sh: --build-arg version=dev \ . docker push ttl.sh/${CURRENT_USER}/ekco:12h + +.PHONY: generate-mocks +generate-mocks: ## Generate mocks tests for CLI and preflight. More info: https://github.com/golang/mock + go install github.com/golang/mock/mockgen@v1.6.0 + mockgen -source=pkg/k8s/exec.go -destination=pkg/k8s/mock/mock_exec.go diff --git a/go.mod b/go.mod index 8f62478..7c59527 100644 --- a/go.mod +++ b/go.mod @@ -6,21 +6,22 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/coreos/go-systemd/v22 v22.4.0 github.com/gin-gonic/gin v1.8.1 + github.com/golang/mock v1.6.0 github.com/google/martian v2.1.0+incompatible github.com/hashicorp/go-multierror v1.1.1 github.com/pkg/errors v0.9.1 github.com/projectcontour/contour v1.19.1 - github.com/rook/rook v1.9.12 - github.com/spf13/cobra v1.4.0 + github.com/rook/rook v1.10.6 + github.com/spf13/cobra v1.6.0 github.com/spf13/viper v1.8.1 github.com/stretchr/testify v1.8.0 go.etcd.io/etcd/client/v3 v3.5.5 go.uber.org/zap v1.23.0 go.undefinedlabs.com/scopeagent v0.1.12 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.25.2 - k8s.io/apimachinery v0.25.2 - k8s.io/client-go v0.25.2 + k8s.io/api v0.25.3 + k8s.io/apimachinery v0.25.3 + k8s.io/client-go v0.25.3 k8s.io/kubernetes v0.0.0-00010101000000-000000000000 k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 ) @@ -40,7 +41,6 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.114 // indirect github.com/beevik/ntp v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1 // indirect @@ -49,17 +49,17 @@ require ( github.com/circonus-labs/circonusllhist v0.1.5 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denverdino/aliyungo v0.0.0-20220929054937-e3c8bf5ad947 // indirect github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 // indirect github.com/digitalocean/godo v1.86.0 // indirect github.com/duosecurity/duo_api_golang v0.0.0-20220927171823-f4576e85b96c // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -111,20 +111,21 @@ require ( github.com/hashicorp/raft v1.3.11 // indirect github.com/hashicorp/raft-boltdb/v2 v2.2.2 // indirect github.com/hashicorp/vault v1.12.0 // indirect - github.com/hashicorp/vault/api v1.8.0 // indirect + github.com/hashicorp/vault/api v1.8.1 // indirect + github.com/hashicorp/vault/api/auth/approle v0.3.0 // indirect github.com/hashicorp/vault/sdk v0.6.1-0.20221010215534-6545e24b6023 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jefferai/jsonx v1.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/joyent/triton-go v1.8.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 // indirect github.com/kr/pretty v0.3.1 // indirect - github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20220811192603-abc54caa34ac // indirect + github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221103224036-07dde5c41fdd // indirect github.com/leodido/go-urn v1.2.1 // indirect - github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec // indirect + github.com/libopenstorage/secrets v0.0.0-20220823020833-2ecadaf59d8a // indirect github.com/linode/linodego v1.9.3 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -157,13 +158,14 @@ require ( github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/pquerna/otp v1.3.0 // indirect github.com/prometheus/client_golang v1.13.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil/v3 v3.22.9 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/softlayer/softlayer-go v1.0.6 // indirect github.com/sony/gobreaker v0.5.0 // indirect github.com/spf13/afero v1.9.2 // indirect @@ -183,18 +185,17 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20221012134737-56aed061732a // indirect - golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458 // indirect - golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 // indirect - golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect - golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect - golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect - golang.org/x/text v0.3.8 // indirect - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect + golang.org/x/net v0.0.0-20221017152216-f25eb7ecb193 // indirect + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect + golang.org/x/sys v0.1.0 // indirect + golang.org/x/term v0.0.0-20221017184919-83659145692c // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/time v0.1.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/api v0.98.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e // indirect - google.golang.org/grpc v1.50.0 // indirect + google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55 // indirect + google.golang.org/grpc v1.50.1 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -202,7 +203,7 @@ require ( gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cluster-bootstrap v0.0.0 // indirect - k8s.io/component-base v0.25.2 // indirect + k8s.io/component-base v0.25.3 // indirect k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect sigs.k8s.io/controller-runtime v0.13.0 // indirect @@ -212,7 +213,7 @@ require ( ) replace ( - // from https://github.com/rook/rook/blob/v1.9.12/go.mod + // from https://github.com/rook/rook/blob/v1.10.6/go.mod github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc3 github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 @@ -251,7 +252,7 @@ replace ( k8s.io/sample-controller => k8s.io/sample-controller v0.25.2 ) -// from https://github.com/rook/rook/blob/v1.9.12/go.mod +// from https://github.com/rook/rook/blob/v1.10.6/go.mod exclude ( // This tag doesn't exist, but is imported by github.com/portworx/sched-ops. github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc2 diff --git a/go.sum b/go.sum index 540a91e..0974131 100644 --- a/go.sum +++ b/go.sum @@ -216,6 +216,7 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQh github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.1/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.11/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= @@ -239,8 +240,7 @@ github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.40.14/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.44.114 h1:plIkWc/RsHr3DXBj4MEw9sEW4CcL/e2ryokc+CKyq1I= -github.com/aws/aws-sdk-go v1.44.114/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.118 h1:FJOqIRTukf7+Ulp047/k7JB6eqMXNnj7eb+coORThHQ= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -355,13 +355,15 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf h1:GOPo6vn/vTN+3IwZBvXX0y5doJfSC7My0cdzelyOCsQ= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpu/goacmedns v0.1.1/go.mod h1:MuaouqEhPAHxsbqjgnck5zeghuwBP1dLnPoobeGqugQ= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -441,8 +443,10 @@ github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMi github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= @@ -463,11 +467,12 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= @@ -687,6 +692,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -861,6 +867,7 @@ github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -868,11 +875,12 @@ github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= +github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew= github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs= -github.com/hashicorp/go-kms-wrapping v0.7.0 h1:UBagVJn4nSNOSjjtpkR370VOEBLnGMXfQcIlE/WL/7o= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 h1:rOFDv+3k05mnW0oaDLffhVUwg03Csn0mvfO98Wdd2bE= github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 h1:ws2CPDuXMKwaBb2z/duBCdnB9pSxlN2nuDZWXcVj6RU= github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= @@ -892,6 +900,7 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= @@ -907,16 +916,22 @@ github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3 github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= @@ -995,10 +1010,14 @@ github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02/go.mod h1:3f12BMfgDGjTsTtIUj+ZKZwSobQpZtYGFIEehOv5z1o= github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= -github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/api v1.1.1/go.mod h1:29UXcn/1cLOPHQNMWA7bCz2By4PSd0VKPAydKXS5yN0= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.3.0/go.mod h1:EabNQLI0VWbWoGlA+oBLC8PXmR9D60aUVgQGvangFWQ= github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= +github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/api/auth/approle v0.1.1/go.mod h1:mHOLgh//xDx4dpqXoq6tS8Ob0FoCFWLU2ibJ26Lfmag= +github.com/hashicorp/vault/api/auth/approle v0.3.0 h1:Ib0oCNXsCq/QZhPYtXPzJEbGS5WR/KoZf8c84QoFdkU= +github.com/hashicorp/vault/api/auth/approle v0.3.0/go.mod h1:hm51TbjzUkPO0Y17wkrpwOpvyyMRpXJNueTHiG04t3k= github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= @@ -1009,8 +1028,9 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02/go.mod h1:W github.com/hashicorp/vault/sdk v0.1.14-0.20200427170607-03332aaf8d18/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200429182704-29fce8f27ce4/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.2.1/go.mod h1:WfUiO1vYzfBkz1TmoE4ZGU7HD0T0Cl/rZwaxjBkgN4U= +github.com/hashicorp/vault/sdk v0.3.0/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= github.com/hashicorp/vault/sdk v0.6.1-0.20221010215534-6545e24b6023 h1:8fzBxqzx59FBVyvUAXmDsMhdOf8NQO+ZJO8chMWHHno= github.com/hashicorp/vault/sdk v0.6.1-0.20221010215534-6545e24b6023/go.mod h1:h25xhm657j/WX0QYIK43fGeEzaQ4zG/A55vRe+09Q2U= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= @@ -1034,8 +1054,9 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= @@ -1057,6 +1078,7 @@ github.com/jefferai/jsonx v1.0.1/go.mod h1:yFo3l2fcm7cZVHGq3HKLXE+Pd4RWuRjNBDHks github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jetstack/cert-manager v1.5.1/go.mod h1:YGW5O4iuy9SvAfnXCjZOu0B5Upsvg/FaWaqm5UuwkdI= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1120,8 +1142,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20220811192603-abc54caa34ac h1:7B0uHL8ttX9Az1vDaHK8JVVC+SY4riK6qgGyo+3e2D8= -github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20220811192603-abc54caa34ac/go.mod h1:my+EVjOJLeQ9lUR9uVkxRvNNkhO2saSGIgzV8GZT9HY= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221103224036-07dde5c41fdd h1:w9t8utABD/pa2t1+J3KZH8zyNhJpDFUcIWcAxpcSbVI= +github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221103224036-07dde5c41fdd/go.mod h1:my+EVjOJLeQ9lUR9uVkxRvNNkhO2saSGIgzV8GZT9HY= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= @@ -1133,8 +1155,8 @@ github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/autopilot-api v0.6.1-0.20210128210103-5fbb67948648/go.mod h1:6JLrPbR3ZJQFbUY/+QJMl/aF00YdIrLf8/GWAplgvJs= github.com/libopenstorage/openstorage v8.0.0+incompatible/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libopenstorage/operator v0.0.0-20200725001727-48d03e197117/go.mod h1:Qh+VXOB6hj60VmlgsmY+R1w+dFuHK246UueM4SAqZG0= -github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec h1:ezv9ybzCRb86E8aMgG7/GcNSRU/72D0BVEhkNjnCEz8= -github.com/libopenstorage/secrets v0.0.0-20210709082113-dde442ea20ec/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY= +github.com/libopenstorage/secrets v0.0.0-20220823020833-2ecadaf59d8a h1:dHCYranrn+6LzONAnhB3YPHBpMz4vP1IN8BsZNaY+IY= +github.com/libopenstorage/secrets v0.0.0-20220823020833-2ecadaf59d8a/go.mod h1:JqaGrr4zerBaTqX04dajFE14AHcDDrxvCq8nZ5/r4AU= github.com/libopenstorage/stork v1.3.0-beta1.0.20200630005842-9255e7a98775/go.mod h1:qBSzYTJVHlOMg5RINNiHD1kBzlasnrc2uKLPZLgu1Qs= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= @@ -1227,6 +1249,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= @@ -1445,8 +1468,9 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1490,8 +1514,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rook/rook v1.9.12 h1:WoDIDOj8ee4JqckZE+zP4BvAhtnkgqHgboMSnwDmHHo= -github.com/rook/rook v1.9.12/go.mod h1:yR0lQLOFQRKjItcigOLFIUWEhECZZL+ChCiWPKHCAWY= +github.com/rook/rook v1.10.6 h1:cWf5Gmv3Iyc13uei5w3PbAeXh+ue8QnLoGR5/ZXTQnY= +github.com/rook/rook v1.10.6/go.mod h1:JJRQmu8r4BbyX+Qn28L1A+Zufj0KFTICi4NJUPPuSwE= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1532,6 +1556,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= @@ -1566,8 +1591,9 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1841,6 +1867,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1916,8 +1943,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458 h1:MgJ6t2zo8v0tbmLCueaCbF1RM+TtB0rs3Lv8DGtOIpY= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221017152216-f25eb7ecb193 h1:3Moaxt4TfzNcQH6DWvlYKraN1ozhBXQHcgvXjRGeim0= +golang.org/x/net v0.0.0-20221017152216-f25eb7ecb193/go.mod h1:RpDiru2p0u2F0lLpEoqnP2+7xs0ifAuOcJ442g6GU2s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1944,8 +1971,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 h1:3VPzK7eqH25j7GYw5w6g/GzNRc0/fYtrxz27z1gD4W0= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1960,8 +1987,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2077,17 +2103,19 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220919170432-7a66f970e087 h1:tPwmk4vmvVCMdr98VgL4JH+qZxPL8fqlUOHnyOM8N3w= -golang.org/x/term v0.0.0-20220919170432-7a66f970e087/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20221017184919-83659145692c h1:dveknrit5futqEmXAvd2I1BbZIDhxRijsyWHM86NlcA= +golang.org/x/term v0.0.0-20221017184919-83659145692c/go.mod h1:VTIZ7TEbF0BS9Sv9lPTvGbtW8i4z6GGbJBCM37uMCzY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2098,8 +2126,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2110,8 +2138,8 @@ golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2269,6 +2297,7 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2360,9 +2389,10 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e h1:halCgTFuLWDRD61piiNSxPsARANGD3Xl16hPrLgLiIg= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55 h1:U1u4KB2kx6KR/aJDjQ97hZ15wQs8ZPvDcGcRynBhkvg= +google.golang.org/genproto v0.0.0-20221018160656-63c7b68cfc55/go.mod h1:45EK0dUbEZ2NHjCeAd2LXmyjAgGUGrpGROgjhC3ADck= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2404,8 +2434,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/pkg/cluster/constants.go b/pkg/cluster/constants.go index f291663..a6f0f65 100644 --- a/pkg/cluster/constants.go +++ b/pkg/cluster/constants.go @@ -3,11 +3,12 @@ package cluster import "k8s.io/apimachinery/pkg/labels" const ( - RookCephNS = "rook-ceph" - CephClusterName = "rook-ceph" - RookCephSharedFSMetadataPool = "rook-shared-fs-metadata" - RookCephSharedFSDataPool = "rook-shared-fs-data0" - CephDeviceHealthMetricsPool = "device_health_metrics" + RookCephNS = "rook-ceph" + CephClusterName = "rook-ceph" + RookCephSharedFSMetadataPool = "rook-shared-fs-metadata" + RookCephSharedFSDataPool = "rook-shared-fs-data0" + CephDeviceHealthMetricsPool = "device_health_metrics" + CephDeviceHealthMetricsPoolQuincy = ".mgr" RookCephObjectStoreRootPool = ".rgw.root" diff --git a/pkg/cluster/controller.go b/pkg/cluster/controller.go index 9bb73c2..95122ed 100644 --- a/pkg/cluster/controller.go +++ b/pkg/cluster/controller.go @@ -7,6 +7,7 @@ import ( "k8s.io/client-go/dynamic" "github.com/blang/semver" + "github.com/replicatedhq/ekco/pkg/k8s" cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" "go.uber.org/zap" "k8s.io/client-go/kubernetes" @@ -23,7 +24,7 @@ var Rookv19 = semver.MustParse("1.9.0") type ControllerConfig struct { Client kubernetes.Interface ClientConfig *restclient.Config - CephV1 *cephv1.CephV1Client + CephV1 cephv1.CephV1Interface AlertManagerV1 dynamic.NamespaceableResourceInterface PrometheusV1 dynamic.NamespaceableResourceInterface CertificatesDir string @@ -52,12 +53,18 @@ type ControllerConfig struct { } type Controller struct { - Config ControllerConfig - Log *zap.SugaredLogger + Config ControllerConfig + SyncExecutor k8s.SyncExecutorInterface + Log *zap.SugaredLogger sync.Mutex } func NewController(config ControllerConfig, log *zap.SugaredLogger) *Controller { - return &Controller{Config: config, Log: log} + syncExecutor := k8s.NewSyncExecutor(config.Client.CoreV1(), config.ClientConfig) + return &Controller{ + Config: config, + SyncExecutor: syncExecutor, + Log: log, + } } diff --git a/pkg/cluster/internallb.go b/pkg/cluster/internallb.go index 6fbd892..ffbe9c5 100644 --- a/pkg/cluster/internallb.go +++ b/pkg/cluster/internallb.go @@ -9,7 +9,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/replicatedhq/ekco/pkg/k8s" "github.com/replicatedhq/ekco/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,7 +144,7 @@ func (c *Controller) sighupPods(namespace string, selector labels.Selector, cont pod := pod go func() { defer wg.Done() - exitCode, _, stderr, err := k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, namespace, pod.Name, container, cmd...) + exitCode, _, stderr, err := c.SyncExecutor.ExecContainer(context.TODO(), namespace, pod.Name, container, cmd...) if err != nil { errs <- errors.Wrapf(err, "exec pod %s", pod.Name) } else if exitCode != 0 { diff --git a/pkg/cluster/rook_ceph.go b/pkg/cluster/rook_ceph.go index 4ea835e..c640032 100644 --- a/pkg/cluster/rook_ceph.go +++ b/pkg/cluster/rook_ceph.go @@ -5,9 +5,9 @@ import ( "bytes" "context" _ "embed" + "encoding/json" "fmt" "log" - "reflect" "regexp" "strconv" "strings" @@ -24,6 +24,24 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" ) +const ( + // maxMonCount is the maximum number of mon replicas that should be + // deployed to a cluster + maxMonCount = 3 + + // minMonCount is the minimum number of mon replicas that should be + // deployed to a cluster + minMonCount = 1 + + // maxMgrCount is the maximum number of mgr replicas that should be + // deployed to a cluster + maxMgrCount = 2 + + // minMgrCount is the minimum number of mgr replicas that should be + // deployed to a cluster + minMgrCount = 1 +) + var cephErrENOENT = errors.New("Ceph ENOENT") var cephOSDStatusRX = regexp.MustCompile(`^\s*\d\s+(?P\S+)`) @@ -41,31 +59,44 @@ func init() { } // returns the number of nodes used for storage, which may be higher than the number of names passed in -// if a node is currenlty not ready but has not been purged +// if a node is currently not ready but has not been purged func (c *Controller) UseNodesForStorage(rookVersion semver.Version, names []string) (int, error) { cluster, err := c.GetCephCluster(context.TODO()) if err != nil { return 0, errors.Wrapf(err, "get CephCluster config") } + var next []cephv1.Node storageNodes := make(map[string]bool, len(cluster.Spec.Storage.Nodes)) for _, storageNode := range cluster.Spec.Storage.Nodes { + next = append(next, storageNode) storageNodes[storageNode.Name] = true } changed := false for _, name := range names { if !storageNodes[name] { c.Log.Infof("Adding node %q to CephCluster node storage list", name) - cluster.Spec.Storage.Nodes = append(cluster.Spec.Storage.Nodes, cephv1.Node{ + next = append(next, cephv1.Node{ Name: name, }) changed = true } } if changed { - cluster.Spec.Storage.UseAllNodes = false - _, err := c.Config.CephV1.CephClusters("rook-ceph").Update(context.TODO(), cluster, metav1.UpdateOptions{}) + patches := []k8s.JSONPatchOperation{} + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/storage/nodes", + Value: next, + }) + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/storage/useAllNodes", + Value: false, + }) + + _, err = c.JSONPatchCephCluster(context.TODO(), patches) if err != nil { - return 0, errors.Wrap(err, "update CephCluster with new storage node list") + return 0, errors.Wrap(err, "patch CephCluster with new storage node list") } } @@ -77,20 +108,29 @@ func (c *Controller) removeCephClusterStorageNode(name string) error { if err != nil { return errors.Wrapf(err, "get CephCluster config") } - var keep []cephv1.Node + changed := false + var next []cephv1.Node for _, node := range cluster.Spec.Storage.Nodes { if node.Name == name { c.Log.Infof("Removing node %q from CephCluster storage list", name) + changed = true } else { - keep = append(keep, node) + next = append(next, node) } } - if !reflect.DeepEqual(keep, cluster.Spec.Storage.Nodes) { - cluster.Spec.Storage.Nodes = keep - _, err = c.Config.CephV1.CephClusters("rook-ceph").Update(context.TODO(), cluster, metav1.UpdateOptions{}) + + if changed { + patches := []k8s.JSONPatchOperation{} + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/storage/nodes", + Value: next, + }) + _, err = c.JSONPatchCephCluster(context.TODO(), patches) if err != nil { - return errors.Wrap(err, "update CephCluster with new storage node list") + return errors.Wrap(err, "patch CephCluster with new storage node list") } + c.Log.Infof("Purge node %q: removed from CephCluster node storage list", name) } @@ -105,7 +145,7 @@ func (c *Controller) deleteK8sDeploymentOSD(name string) (string, error) { opts := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(rookOSDLabels).String(), } - deploys, err := c.Config.Client.AppsV1().Deployments("rook-ceph").List(context.TODO(), opts) + deploys, err := c.Config.Client.AppsV1().Deployments(RookCephNS).List(context.TODO(), opts) if err != nil { return "", errors.Wrap(err, "list Rook OSD deployments") } @@ -118,7 +158,7 @@ func (c *Controller) deleteK8sDeploymentOSD(name string) (string, error) { opts := metav1.DeleteOptions{ PropagationPolicy: &background, } - err := c.Config.Client.AppsV1().Deployments("rook-ceph").Delete(context.TODO(), deploy.Name, opts) + err := c.Config.Client.AppsV1().Deployments(RookCephNS).Delete(context.TODO(), deploy.Name, opts) if err != nil { return "", errors.Wrapf(err, "delete deployment %s", deploy.Name) } @@ -153,10 +193,19 @@ func (c *Controller) SetBlockPoolReplication(rookVersion semver.Version, name st } else { c.Log.Debugf("Ensuring CephBlockPool replication level is %d", level) } - pool.Spec.Replicated.Size = uint(level) - _, err = c.Config.CephV1.CephBlockPools(RookCephNS).Update(context.TODO(), pool, metav1.UpdateOptions{}) + patches := []k8s.JSONPatchOperation{{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/replicated/size", + Value: uint(level), + }} + patchData, err := json.Marshal(patches) if err != nil { - return false, errors.Wrapf(err, "update CephBlockPool %s", name) + return false, errors.Wrap(err, "marshal patch data") + } + c.Log.Debugf("Patching CephBlockPool %s with %s", pool.Name, string(patchData)) + _, err = c.Config.CephV1.CephBlockPools(RookCephNS).Patch(context.TODO(), pool.Name, apitypes.JSONPatchType, patchData, metav1.PatchOptions{}) + if err != nil { + return false, errors.Wrapf(err, "patch CephBlockPool %s", name) } if rookVersion.LT(Rookv14) { // Changing the replicated size of the pool in the CephBlockPool does not set the min_size on @@ -181,7 +230,7 @@ func (c *Controller) SetBlockPoolReplication(rookVersion semver.Version, name st return true, nil } -func (c *Controller) SetDeviceHealthMetricsReplication(rookVersion semver.Version, cephBlockPoolName string, level int, cephcluster *cephv1.CephCluster, doFullReconcile bool) (bool, error) { +func (c *Controller) SetDeviceHealthMetricsReplication(rookVersion semver.Version, cephVersion string, cephBlockPoolName string, level int, cephcluster *cephv1.CephCluster, doFullReconcile bool) (bool, error) { if rookVersion.LT(Rookv14) { return false, nil } @@ -196,26 +245,39 @@ func (c *Controller) SetDeviceHealthMetricsReplication(rookVersion semver.Versio minSize = 2 } - c.Log.Debugf("Ensuring %s replication level is %d", CephDeviceHealthMetricsPool, level) + poolName := CephDeviceHealthMetricsPool + if cephVersion != "" { + cephVersionSemver, err := semver.Parse(cephVersion) + if err != nil { + return false, errors.Wrapf(err, "parse ceph version %q", cephVersion) + } + if cephVersionSemver.Major >= 17 { + poolName = CephDeviceHealthMetricsPoolQuincy + } + } - err := c.cephOSDPoolSetSize(rookVersion, CephDeviceHealthMetricsPool, level, cephcluster) + c.Log.Debugf("Ensuring %s replication level is %d", poolName, level) + + err := c.cephOSDPoolSetSize(rookVersion, poolName, level, cephcluster) if err != nil { - return false, errors.Wrapf(err, "scale %s pool size", CephDeviceHealthMetricsPool) + return false, errors.Wrapf(err, "scale %s pool size", poolName) } - err = c.cephOSDPoolSetMinSize(rookVersion, CephDeviceHealthMetricsPool, minSize) + err = c.cephOSDPoolSetMinSize(rookVersion, poolName, minSize) if err != nil { - return false, errors.Wrapf(err, "scale %s pool min_size", CephDeviceHealthMetricsPool) + return false, errors.Wrapf(err, "scale %s pool min_size", poolName) } return true, nil } -func (c *Controller) ReconcileMonCount(ctx context.Context, count int) error { +// ReconcileMonCount ensures the CephCluster has the desired number of mons. +// A single mon for clusters with 1 or 2 nodes, and 3 mons for all other +// clusters. +func (c *Controller) ReconcileMonCount(ctx context.Context, nodeCount int) error { // single mon for 1 or 2 node cluster, 3 mons for all other clusters - if count < 3 { - count = 1 - } else { - count = 3 + desiredMonCount := maxMonCount + if nodeCount < maxMonCount { + desiredMonCount = minMonCount } cluster, err := c.GetCephCluster(ctx) @@ -223,34 +285,41 @@ func (c *Controller) ReconcileMonCount(ctx context.Context, count int) error { return errors.Wrapf(err, "get CephCluster config") } - if cluster.Spec.Mon.Count == count { + if cluster.Spec.Mon.Count == desiredMonCount { return nil } - if cluster.Spec.Mon.Count > count { - c.Log.Debugf("Will not reduce mon count from %s to %s", cluster.Spec.Mon.Count, count) + if cluster.Spec.Mon.Count > desiredMonCount { + c.Log.Debugf("Will not reduce mon count from %s to %s", cluster.Spec.Mon.Count, desiredMonCount) return nil } - c.Log.Infof("Increasing mon count from %d to %d", cluster.Spec.Mon.Count, count) - cluster.Spec.Mon.Count = count - _, err = c.Config.CephV1.CephClusters("rook-ceph").Update(ctx, cluster, metav1.UpdateOptions{}) + c.Log.Infof("Increasing mon count from %d to %d", cluster.Spec.Mon.Count, desiredMonCount) + + patches := []k8s.JSONPatchOperation{{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/mon/count", + Value: desiredMonCount, + }} + + _, err = c.JSONPatchCephCluster(ctx, patches) if err != nil { - return errors.Wrap(err, "update CephCluster with new mon count") + return errors.Wrap(err, "patch CephCluster with new mon count") } return nil } -func (c *Controller) ReconcileMgrCount(ctx context.Context, rookVersion semver.Version, count int) error { +// ReconcileMgrCount ensures the CephCluster has the desired number of mgrs. +// A single mgr for clusters with 1 node, and 2 mgrs for all other clusters. +func (c *Controller) ReconcileMgrCount(ctx context.Context, rookVersion semver.Version, nodeCount int) error { if rookVersion.LT(Rookv19) { return nil } // single mgr for 1 node cluster, 2 mgrs for all other clusters - if count < 2 { - count = 1 - } else { - count = 2 + desiredMgrCount := maxMgrCount + if nodeCount < maxMgrCount { + desiredMgrCount = minMgrCount } cluster, err := c.GetCephCluster(ctx) @@ -258,19 +327,25 @@ func (c *Controller) ReconcileMgrCount(ctx context.Context, rookVersion semver.V return errors.Wrapf(err, "get CephCluster config") } - if cluster.Spec.Mgr.Count == count { + if cluster.Spec.Mgr.Count == desiredMgrCount { return nil } - if cluster.Spec.Mgr.Count > count { - c.Log.Debugf("Will not reduce mgr count from %s to %s", cluster.Spec.Mgr.Count, count) + if cluster.Spec.Mgr.Count > desiredMgrCount { + c.Log.Debugf("Will not reduce mgr count from %s to %s", cluster.Spec.Mgr.Count, desiredMgrCount) return nil } - c.Log.Infof("Increasing mgr count from %d to %d", cluster.Spec.Mgr.Count, count) - cluster.Spec.Mgr.Count = count - _, err = c.Config.CephV1.CephClusters("rook-ceph").Update(ctx, cluster, metav1.UpdateOptions{}) + c.Log.Infof("Increasing mgr count from %d to %d", cluster.Spec.Mgr.Count, desiredMgrCount) + + patches := []k8s.JSONPatchOperation{{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/mgr/count", + Value: desiredMgrCount, + }} + + _, err = c.JSONPatchCephCluster(ctx, patches) if err != nil { - return errors.Wrap(err, "update CephCluster with new mgr count") + return errors.Wrap(err, "patch CephCluster with new mgr count") } return nil @@ -315,7 +390,7 @@ func (c *Controller) SetCephCSIResources(ctx context.Context, rookVersion semver return false, nil } - configMap, err := c.Config.Client.CoreV1().ConfigMaps("rook-ceph").Get(ctx, "rook-ceph-operator-config", metav1.GetOptions{}) + configMap, err := c.Config.Client.CoreV1().ConfigMaps(RookCephNS).Get(ctx, "rook-ceph-operator-config", metav1.GetOptions{}) if err != nil { return false, errors.Wrap(err, "get rook-ceph-operator-config configmap") } @@ -326,7 +401,7 @@ func (c *Controller) SetCephCSIResources(ctx context.Context, rookVersion semver c.Log.Infof("Setting Ceph CSI plugin and provisioner resources") - _, err = c.Config.Client.CoreV1().ConfigMaps("rook-ceph").Patch(ctx, "rook-ceph-operator-config", apitypes.MergePatchType, cephCSIResourcesPatch, metav1.PatchOptions{}) + _, err = c.Config.Client.CoreV1().ConfigMaps(RookCephNS).Patch(ctx, "rook-ceph-operator-config", apitypes.MergePatchType, cephCSIResourcesPatch, metav1.PatchOptions{}) if err != nil { return false, errors.Wrap(err, "patch rook-ceph-operator-config configmap") } @@ -342,7 +417,8 @@ func cephCSIResourcesNeedsUpdate(data map[string]string) bool { data["CSI_NFS_PLUGIN_RESOURCE"] != cephCSINfsPluginResource } -// SetSharedFilesystemReplication ignores NotFound errors. +// SetSharedFilesystemReplication will set the shared filesystem replication to +// the number of OSDs in the cluster. Returns true if the resource was updated. func (c *Controller) SetFilesystemReplication(rookVersion semver.Version, name string, level int, cephcluster *cephv1.CephCluster, doFullReconcile bool) (bool, error) { if name == "" { return false, nil @@ -355,32 +431,47 @@ func (c *Controller) SetFilesystemReplication(rookVersion semver.Version, name s } return false, errors.Wrapf(err, "get Filesystem %s", name) } - changed := false + patches := []k8s.JSONPatchOperation{} for i, pool := range fs.Spec.DataPools { current := int(pool.Replicated.Size) if current < level { fs.Spec.DataPools[i].Replicated.Size = uint(level) - changed = true + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: fmt.Sprintf("/spec/dataPools/%d/replicated/size", i), + Value: uint(level), + }) } } current := int(fs.Spec.MetadataPool.Replicated.Size) if current < level { fs.Spec.MetadataPool.Replicated.Size = uint(level) - changed = true + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/metadataPool/replicated/size", + Value: uint(level), + }) } - if !(changed || doFullReconcile) { + if !(len(patches) > 0 || doFullReconcile) { return false, nil } - if changed { + if len(patches) > 0 { c.Log.Infof("Changing CephFilesystem pool replication level from %d to %d", current, level) } else { c.Log.Debugf("Ensuring CephFilesystem pool replication level is %d", level) } - _, err = c.Config.CephV1.CephFilesystems("rook-ceph").Update(context.TODO(), fs, metav1.UpdateOptions{}) + + patchData, err := json.Marshal(patches) if err != nil { - return false, errors.Wrapf(err, "update Filesystem %s", name) + return false, errors.Wrap(err, "json marshal patches") + } + + c.Log.Debugf("Patching CephFilesystem %s with %s", fs.Name, string(patchData)) + _, err = c.Config.CephV1.CephFilesystems(RookCephNS).Patch(context.TODO(), fs.Name, apitypes.JSONPatchType, patchData, metav1.PatchOptions{}) + if err != nil { + return false, errors.Wrapf(err, "patch Filesystem %s", name) } if rookVersion.LT(Rookv14) { minSize := 1 @@ -453,6 +544,9 @@ func (c *Controller) PatchFilesystemMDSPlacementMultinode(name string, numNodes } // SetObjectStoreReplication ignores NotFound errors. + +// SetObjectStoreReplication will set the object store pool replication to the +// number of OSDs in the cluster. Returns true if the resource was updated. func (c *Controller) SetObjectStoreReplication(rookVersion semver.Version, name string, level int, cephcluster *cephv1.CephCluster, doFullReconcile bool) (bool, error) { if name == "" { return false, nil @@ -465,21 +559,28 @@ func (c *Controller) SetObjectStoreReplication(rookVersion semver.Version, name } return false, errors.Wrapf(err, "get CephObjectStore %s", name) } - changed := false + + patches := []k8s.JSONPatchOperation{} current := int(os.Spec.DataPool.Replicated.Size) if current < level { - os.Spec.DataPool.Replicated.Size = uint(level) - changed = true + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/dataPool/replicated/size", + Value: uint(level), + }) } current = int(os.Spec.MetadataPool.Replicated.Size) if current < level { - os.Spec.MetadataPool.Replicated.Size = uint(level) - changed = true + patches = append(patches, k8s.JSONPatchOperation{ + Op: k8s.JSONPatchOpReplace, + Path: "/spec/metadataPool/replicated/size", + Value: uint(level), + }) } - if !(changed || doFullReconcile) { + if !(len(patches) > 0 || doFullReconcile) { return false, nil } @@ -487,14 +588,21 @@ func (c *Controller) SetObjectStoreReplication(rookVersion semver.Version, name if level > 1 { minSize = 2 } - if changed { + if len(patches) > 0 { c.Log.Infof("Changing CephObjectStore pool replication level from %d to %d", current, level) } else { c.Log.Debugf("Ensuring CephOjbectStore pool replication level is %d", level) } - _, err = c.Config.CephV1.CephObjectStores(RookCephNS).Update(context.TODO(), os, metav1.UpdateOptions{}) + + patchData, err := json.Marshal(patches) if err != nil { - return false, errors.Wrapf(err, "update CephObjectStore %s", name) + return false, errors.Wrap(err, "json marshal patches") + } + + c.Log.Debugf("Patching CephObjectStore %s with %s", os.Name, string(patchData)) + _, err = c.Config.CephV1.CephObjectStores(RookCephNS).Patch(context.TODO(), os.Name, apitypes.JSONPatchType, patchData, metav1.PatchOptions{}) + if err != nil { + return false, errors.Wrapf(err, "patch CephObjectStore %s", name) } // Changing the size in the CephObjectStore has no effect in Rook 1.0 so it needs to be set // manually https://github.com/rook/rook/issues/4341 @@ -542,7 +650,7 @@ func (c *Controller) rookCephExec(rookVersion semver.Version, cmd ...string) err opts := metav1.ListOptions{ LabelSelector: rookLabels, } - pods, err := c.Config.Client.CoreV1().Pods("rook-ceph").List(context.TODO(), opts) + pods, err := c.Config.Client.CoreV1().Pods(RookCephNS).List(context.TODO(), opts) if err != nil { return errors.Wrap(err, "list Rook pods") } @@ -550,7 +658,7 @@ func (c *Controller) rookCephExec(rookVersion semver.Version, cmd ...string) err return errors.Wrap(err, "found no Rook pods for executing ceph commands") } - exitCode, stdout, stderr, err := k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, cmd...) + exitCode, stdout, stderr, err := c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, cmd...) if err != nil { return err } @@ -574,7 +682,7 @@ func (c *Controller) execCephOSDPurge(rookVersion semver.Version, osdID string, opts := metav1.ListOptions{ LabelSelector: rookLabels, } - pods, err := c.Config.Client.CoreV1().Pods("rook-ceph").List(context.TODO(), opts) + pods, err := c.Config.Client.CoreV1().Pods(RookCephNS).List(context.TODO(), opts) if err != nil { return errors.Wrap(err, "list Rook tools pods") } @@ -582,9 +690,9 @@ func (c *Controller) execCephOSDPurge(rookVersion semver.Version, osdID string, return errors.Wrapf(err, "found no Rook pods for executing ceph commands") } // ignore error - OSD is probably already down - _, _, _, _ = k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, "ceph", "osd", "down", osdID) + _, _, _, _ = c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, "ceph", "osd", "down", osdID) - exitCode, stdout, stderr, err := k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, "ceph", "osd", "purge", osdID, "--yes-i-really-mean-it") + exitCode, stdout, stderr, err := c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, "ceph", "osd", "purge", osdID, "--yes-i-really-mean-it") if exitCode != 0 { c.Log.Debugf("`ceph osd purge %s` stdout: %s", osdID, stdout) return fmt.Errorf("failed to purge OSD: %s", stderr) @@ -594,7 +702,7 @@ func (c *Controller) execCephOSDPurge(rookVersion semver.Version, osdID string, } // This removes the phantom OSD from the output of `ceph osd tree` - exitCode, stdout, stderr, err = k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, "ceph", "osd", "crush", "rm", hostname) + exitCode, stdout, stderr, err = c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, "ceph", "osd", "crush", "rm", hostname) if exitCode != 0 { c.Log.Debugf("`ceph osd crush rm %s` stdout: %s", hostname, stdout) return fmt.Errorf("failed to rm %s from crush map: %s", hostname, stderr) @@ -611,7 +719,7 @@ func (c *Controller) CephFilesystemOK(rookVersion semver.Version, name string) ( opts := metav1.ListOptions{ LabelSelector: rookLabels, } - pods, err := c.Config.Client.CoreV1().Pods("rook-ceph").List(context.TODO(), opts) + pods, err := c.Config.Client.CoreV1().Pods(RookCephNS).List(context.TODO(), opts) if err != nil { return false, errors.Wrap(err, "list Rook tools pods") } @@ -620,7 +728,7 @@ func (c *Controller) CephFilesystemOK(rookVersion semver.Version, name string) ( } // The filesystem will appear in `ceph fs ls` before it's ready to use. `ceph mds metadata` is // better because it waits for the mds daemons to be running - exitCode, stdout, stderr, err := k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, "ceph", "mds", "metadata") + exitCode, stdout, stderr, err := c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, "ceph", "mds", "metadata") if err != nil { return false, errors.Wrap(err, "running 'ceph fs ls'") } @@ -686,7 +794,7 @@ func (c *Controller) countUniqueHostsWithOSD(rookVersion semver.Version) (int, e opts := metav1.ListOptions{ LabelSelector: rookLabels, } - pods, err := c.Config.Client.CoreV1().Pods("rook-ceph").List(context.TODO(), opts) + pods, err := c.Config.Client.CoreV1().Pods(RookCephNS).List(context.TODO(), opts) if err != nil { return 0, errors.Wrap(err, "list Rook pods") } @@ -695,7 +803,7 @@ func (c *Controller) countUniqueHostsWithOSD(rookVersion semver.Version) (int, e } cmd := []string{"ceph", "osd", "status"} - exitCode, stdout, stderr, err := k8s.SyncExec(c.Config.Client.CoreV1(), c.Config.ClientConfig, "rook-ceph", pods.Items[0].Name, container, cmd...) + exitCode, stdout, stderr, err := c.SyncExecutor.ExecContainer(context.TODO(), RookCephNS, pods.Items[0].Name, container, cmd...) if err != nil { return 0, errors.Wrap(err, "exec ceph osd status") } @@ -749,7 +857,7 @@ func (c *Controller) PrioritizeRook() error { } for _, selector := range selectors { c.Log.Debugf("Setting priority class for rook-ceph deployments with label %s", selector) - if err := c.prioritizeRookDeployments("rook-ceph", selector); err != nil { + if err := c.prioritizeRookDeployments(RookCephNS, selector); err != nil { return err } } @@ -758,7 +866,7 @@ func (c *Controller) PrioritizeRook() error { } func (c *Controller) prioritizeRookAgent() error { - dsClient := c.Config.Client.AppsV1().DaemonSets("rook-ceph") + dsClient := c.Config.Client.AppsV1().DaemonSets(RookCephNS) agentDS, err := dsClient.Get(context.TODO(), "rook-ceph-agent", metav1.GetOptions{}) if err != nil { if util.IsNotFoundErr(err) { @@ -810,6 +918,17 @@ func (c *Controller) GetCephCluster(ctx context.Context) (*cephv1.CephCluster, e return c.Config.CephV1.CephClusters(RookCephNS).Get(ctx, CephClusterName, metav1.GetOptions{}) } +// JSONPatchCephCluster patches the "rook-ceph" CephCluster with the given JSON +// patches. +func (c *Controller) JSONPatchCephCluster(ctx context.Context, patches []k8s.JSONPatchOperation) (*cephv1.CephCluster, error) { + patchData, err := json.Marshal(patches) + if err != nil { + return nil, errors.Wrap(err, "marshal json patch") + } + c.Log.Debugf("Patching CephCluster %s with %s", CephClusterName, string(patchData)) + return c.Config.CephV1.CephClusters(RookCephNS).Patch(ctx, CephClusterName, apitypes.JSONPatchType, patchData, metav1.PatchOptions{}) +} + func (c *Controller) cephOSDPoolSetSize(rookVersion semver.Version, name string, size int, cephcluster *cephv1.CephCluster) error { args := []string{"ceph", "osd", "pool", "set", name, "size", strconv.Itoa(size)} if size == 1 { @@ -835,12 +954,12 @@ func (c *Controller) cephOSDPoolSetMinSize(rookVersion semver.Version, name stri // GetRookVersion gets the Rook version from the container image tag of the rook-ceph-operator // deployment in the rook-ceph namespace. func (c *Controller) GetRookVersion(ctx context.Context) (*semver.Version, error) { - _, err := c.Config.Client.CoreV1().Namespaces().Get(ctx, "rook-ceph", metav1.GetOptions{}) + _, err := c.Config.Client.CoreV1().Namespaces().Get(ctx, RookCephNS, metav1.GetOptions{}) if err != nil { return nil, errors.Wrap(err, "get rook-ceph namespace") } - deploy, err := c.Config.Client.AppsV1().Deployments("rook-ceph").Get(ctx, "rook-ceph-operator", metav1.GetOptions{}) + deploy, err := c.Config.Client.AppsV1().Deployments(RookCephNS).Get(ctx, "rook-ceph-operator", metav1.GetOptions{}) if err != nil { return nil, errors.Wrap(err, "get rook-ceph-operator deployment") } diff --git a/pkg/cluster/rook_ceph_test.go b/pkg/cluster/rook_ceph_test.go index c2a65ce..91b21e8 100644 --- a/pkg/cluster/rook_ceph_test.go +++ b/pkg/cluster/rook_ceph_test.go @@ -3,11 +3,17 @@ package cluster import ( "context" _ "embed" + "fmt" + "reflect" "testing" "github.com/blang/semver" + "github.com/golang/mock/gomock" + mock_k8s "github.com/replicatedhq/ekco/pkg/k8s/mock" "github.com/replicatedhq/ekco/pkg/logger" "github.com/replicatedhq/ekco/pkg/util" + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookfake "github.com/rook/rook/pkg/client/clientset/versioned/fake" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -264,3 +270,1056 @@ func TestController_GetRookVersion(t *testing.T) { }) } } + +func TestController_UseNodesForStorage(t *testing.T) { + type args struct { + rookVersion semver.Version + names []string + } + tests := []struct { + name string + nodes []string + rookResources []runtime.Object + args args + want int + wantStorageScopeSpec cephv1.StorageScopeSpec + wantErr bool + }{ + { + name: "storage nodes should change from useAllNodes to 1, rook version 1.9.12", + nodes: []string{"node1"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: true, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + names: []string{"node1"}, + }, + want: 1, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + }, + }, + wantErr: false, + }, + { + name: "storage nodes should stay at 1, rook version 1.9.12", + nodes: []string{"node1"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + names: []string{"node1"}, + }, + want: 1, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + }, + }, + wantErr: false, + }, + { + name: "storage nodes should increase from 1 to 3, rook version 1.9.12", + nodes: []string{"node1", "node2", "node3"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + names: []string{"node1", "node2", "node3"}, + }, + want: 3, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + { + Name: "node3", + }, + }, + }, + wantErr: false, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // TODO: deploy/rook-ceph-operator for rookVersion 1.0.4 + resources := []runtime.Object{ + &corev1.Pod{ + TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph-tools-5b8b8b8b8b-5b8b8", + Namespace: "rook-ceph", + Labels: map[string]string{ + "app": "rook-ceph-tools", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "rook-ceph-tools", + }, + }, + }, + }, + } + cephOSDStatusOut := "ID HOST USED AVAIL WR OPS WR DATA RD OPS RD DATA STATE" + for _, node := range tt.nodes { + resources = append(resources, &corev1.Node{ + TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: node, + }, + }) + cephOSDStatusOut += fmt.Sprintf("\n0 %s 128M 99.8G 0 0 2 84 exists,up", node) + } + + m := mock_k8s.NewMockSyncExecutorInterface(ctrl) + m.EXPECT().ExecContainer(gomock.Any(), "rook-ceph", "rook-ceph-tools-5b8b8b8b8b-5b8b8", "rook-ceph-tools", "ceph", "osd", "status"). + Return(0, cephOSDStatusOut, "", nil) + + clientset := fake.NewSimpleClientset(resources...) + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + + c := &Controller{ + Config: ControllerConfig{ + Client: clientset, + CephV1: rookClientset.CephV1(), + }, + SyncExecutor: m, + Log: logger.NewDiscardLogger(), + } + got, err := c.UseNodesForStorage(tt.args.rookVersion, tt.args.names) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.UseNodesForStorage() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Controller.UseNodesForStorage() = %v, want %v", got, tt.want) + } + cephCluster, err := rookClientset.CephV1().CephClusters("rook-ceph").Get(context.Background(), "rook-ceph", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephClusters.Get(\"rook-ceph\") error = %v", err) + return + } + if !reflect.DeepEqual(cephCluster.Spec.Storage, tt.wantStorageScopeSpec) { + t.Errorf("Controller.UseNodesForStorage() = %v, want %v", cephCluster.Spec.Storage, tt.wantStorageScopeSpec) + } + }) + } +} + +func TestController_removeCephClusterStorageNode(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + nodes []string + rookResources []runtime.Object + args args + wantStorageScopeSpec cephv1.StorageScopeSpec + wantErr bool + }{ + { + name: "storage nodes should decrease from 4 to 3, rook version 1.9.12", + nodes: []string{"node1", "node2", "node3", "node4"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + { + Name: "node3", + }, + { + Name: "node4", + }, + }, + }, + }, + }, + }, + args: args{ + name: "node2", + }, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + { + Name: "node3", + }, + { + Name: "node4", + }, + }, + }, + wantErr: false, + }, + { + name: "storage node not found should not change storage spec and not result in an error, rook version 1.9.12", + nodes: []string{"node1", "node2", "node3"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + { + Name: "node3", + }, + }, + }, + }, + }, + }, + args: args{ + name: "node4", + }, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + UseAllNodes: false, + Nodes: []cephv1.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + { + Name: "node3", + }, + }, + }, + wantErr: false, + }, + { + name: "useAllNodes true should not change storage spec and not result in an error, rook version 1.9.12", + nodes: []string{"node1", "node2", "node3"}, + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{Kind: "CephCluster", APIVersion: "ceph.rook.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Storage: cephv1.StorageScopeSpec{ + UseAllNodes: true, + }, + }, + }, + }, + args: args{ + name: "node4", + }, + wantStorageScopeSpec: cephv1.StorageScopeSpec{ + UseAllNodes: true, + }, + wantErr: false, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resources := []runtime.Object{} + for _, node := range tt.nodes { + resources = append(resources, &corev1.Node{ + TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: node, + }, + }) + } + + clientset := fake.NewSimpleClientset(resources...) + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + + c := &Controller{ + Config: ControllerConfig{ + Client: clientset, + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + err := c.removeCephClusterStorageNode(tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.removeCephClusterStorageNode() error = %v, wantErr %v", err, tt.wantErr) + return + } + cephCluster, err := rookClientset.CephV1().CephClusters("rook-ceph").Get(context.Background(), "rook-ceph", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephClusters.Get(\"rook-ceph\") error = %v", err) + return + } + if !reflect.DeepEqual(cephCluster.Spec.Storage, tt.wantStorageScopeSpec) { + t.Errorf("Controller.UseNodesForStorage() = %v, want %v", cephCluster.Spec.Storage, tt.wantStorageScopeSpec) + } + }) + } +} + +func TestController_SetBlockPoolReplication(t *testing.T) { + type args struct { + rookVersion semver.Version + name string + level int + cephcluster *cephv1.CephCluster + doFullReconcile bool + } + tests := []struct { + name string + rookResources []runtime.Object + args args + want bool + wantLevel uint + wantErr bool + }{ + { + name: "blockpool replication should stay at 1, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephBlockPool{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephBlockPool", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replicapool", + Namespace: "rook-ceph", + }, + Spec: cephv1.NamedBlockPoolSpec{ + Name: "replicapool", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "replicapool", + level: 1, + cephcluster: nil, + doFullReconcile: false, + }, + want: false, + wantLevel: 1, + wantErr: false, + }, + { + name: "blockpool replication should stay at 1, do full reconcile, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephBlockPool{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephBlockPool", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replicapool", + Namespace: "rook-ceph", + }, + Spec: cephv1.NamedBlockPoolSpec{ + Name: "replicapool", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "replicapool", + level: 1, + cephcluster: nil, + doFullReconcile: true, + }, + want: true, + wantLevel: 1, + wantErr: false, + }, + { + name: "blockpool replication should increase from 1 to 3, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephBlockPool{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephBlockPool", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "replicapool", + Namespace: "rook-ceph", + }, + Spec: cephv1.NamedBlockPoolSpec{ + Name: "replicapool", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "replicapool", + level: 3, + cephcluster: nil, + doFullReconcile: false, + }, + want: true, + wantLevel: 3, + wantErr: false, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + c := &Controller{ + Config: ControllerConfig{ + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + got, err := c.SetBlockPoolReplication(tt.args.rookVersion, tt.args.name, tt.args.level, tt.args.cephcluster, tt.args.doFullReconcile) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.SetBlockPoolReplication() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Controller.SetBlockPoolReplication() = %v, want %v", got, tt.want) + } + cephBP, err := rookClientset.CephV1().CephBlockPools("rook-ceph").Get(context.Background(), "replicapool", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephBlockPools.Get(\"replicapool\") error = %v", err) + return + } + if cephBP.Spec.Replicated.Size != tt.wantLevel { + t.Errorf("CephBlockPool.Spec.Replicated.Size = %d, want %d", cephBP.Spec.Replicated.Size, tt.wantLevel) + } + }) + } +} + +func TestController_ReconcileMonCount(t *testing.T) { + type args struct { + nodeCount int + } + tests := []struct { + name string + rookResources []runtime.Object + args args + wantMonCount int + wantErr bool + }{ + { + name: "mon count should stay at 1", + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Mon: cephv1.MonSpec{ + Count: 1, + }, + }, + }, + }, + args: args{ + nodeCount: 1, + }, + wantMonCount: 1, + }, + { + name: "mon count should increase from 1 to 6", + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Mon: cephv1.MonSpec{ + Count: 1, + }, + }, + }, + }, + args: args{ + nodeCount: 6, + }, + wantMonCount: 3, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + c := &Controller{ + Config: ControllerConfig{ + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + if err := c.ReconcileMonCount(context.Background(), tt.args.nodeCount); (err != nil) != tt.wantErr { + t.Errorf("Controller.ReconcileMonCount() error = %v, wantErr %v", err, tt.wantErr) + } + cephCluster, err := rookClientset.CephV1().CephClusters("rook-ceph").Get(context.Background(), "rook-ceph", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephClusters.Get(\"rook-ceph\") error = %v", err) + return + } + if cephCluster.Spec.Mon.Count != tt.wantMonCount { + t.Errorf("CephCluster.Spec.Mon.Count = %d, want %d", cephCluster.Spec.Mon.Count, tt.wantMonCount) + } + }) + } +} + +func TestController_ReconcileMgrCount(t *testing.T) { + type args struct { + rookVersion semver.Version + nodeCount int + } + tests := []struct { + name string + rookResources []runtime.Object + args args + wantMgrCount int + wantErr bool + }{ + { + name: "mgr count should stay at 1, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Mgr: cephv1.MgrSpec{ + Count: 1, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + nodeCount: 1, + }, + wantMgrCount: 1, + }, + { + name: "mgr count should increase from 1 to 2, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Mgr: cephv1.MgrSpec{ + Count: 1, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + nodeCount: 3, + }, + wantMgrCount: 2, + }, + { + name: "mgr count should not increase beyond 1, rook version 1.8.10", + rookResources: []runtime.Object{ + &cephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph", + Namespace: "rook-ceph", + }, + Spec: cephv1.ClusterSpec{ + Mgr: cephv1.MgrSpec{ + Count: 1, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.8.10"), + nodeCount: 3, + }, + wantMgrCount: 1, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + c := &Controller{ + Config: ControllerConfig{ + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + if err := c.ReconcileMgrCount(context.Background(), tt.args.rookVersion, tt.args.nodeCount); (err != nil) != tt.wantErr { + t.Errorf("Controller.ReconcileMgrCount() error = %v, wantErr %v", err, tt.wantErr) + } + cephCluster, err := rookClientset.CephV1().CephClusters("rook-ceph").Get(context.Background(), "rook-ceph", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephClusters.Get(\"rook-ceph\") error = %v", err) + return + } + if cephCluster.Spec.Mgr.Count != tt.wantMgrCount { + t.Errorf("CephCluster.Spec.Mgr.Count = %d, want %d", cephCluster.Spec.Mgr.Count, tt.wantMgrCount) + } + }) + } +} + +func TestController_SetFilesystemReplication(t *testing.T) { + type args struct { + rookVersion semver.Version + name string + level int + cephcluster *cephv1.CephCluster + doFullReconcile bool + } + tests := []struct { + name string + rookResources []runtime.Object + args args + want bool + wantLevel uint + wantErr bool + }{ + { + name: "filesystem replication should stay at 1, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephFilesystem{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephFilesystem", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "myfs", + Namespace: "rook-ceph", + }, + Spec: cephv1.FilesystemSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPools: []cephv1.NamedPoolSpec{ + { + Name: "myfs-data0", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "myfs", + level: 1, + cephcluster: nil, + doFullReconcile: false, + }, + want: false, + wantLevel: 1, + wantErr: false, + }, + { + name: "filesystem replication should stay at 1, do full reconcile, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephFilesystem{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephFilesystem", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "myfs", + Namespace: "rook-ceph", + }, + Spec: cephv1.FilesystemSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPools: []cephv1.NamedPoolSpec{ + { + Name: "myfs-data0", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "myfs", + level: 1, + cephcluster: nil, + doFullReconcile: true, + }, + want: true, + wantLevel: 1, + wantErr: false, + }, + { + name: "filesystem replication should increase from 1 to 3, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephFilesystem{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephFilesystem", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "myfs", + Namespace: "rook-ceph", + }, + Spec: cephv1.FilesystemSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPools: []cephv1.NamedPoolSpec{ + { + Name: "myfs-data0", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "myfs", + level: 3, + cephcluster: nil, + doFullReconcile: false, + }, + want: true, + wantLevel: 3, + wantErr: false, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + c := &Controller{ + Config: ControllerConfig{ + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + got, err := c.SetFilesystemReplication(tt.args.rookVersion, tt.args.name, tt.args.level, tt.args.cephcluster, tt.args.doFullReconcile) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.SetFilesystemReplication() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Controller.SetFilesystemReplication() = %v, want %v", got, tt.want) + } + cephFs, err := rookClientset.CephV1().CephFilesystems("rook-ceph").Get(context.Background(), "myfs", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephFilesystems.Get(\"myfs\") error = %v", err) + return + } + if cephFs.Spec.MetadataPool.Replicated.Size != tt.wantLevel { + t.Errorf("CephFilesystem.Spec.MetadataPool.Replicated.Size = %d, want %d", cephFs.Spec.MetadataPool.Replicated.Size, tt.wantLevel) + } + if cephFs.Spec.DataPools[0].Replicated.Size != tt.wantLevel { + t.Errorf("CephFilesystem.Spec.DataPools[0].Replicated.Size = %d, want %d", cephFs.Spec.DataPools[0].Replicated.Size, tt.wantLevel) + } + }) + } +} + +func TestController_SetObjectStoreReplication(t *testing.T) { + type args struct { + rookVersion semver.Version + name string + level int + cephcluster *cephv1.CephCluster + doFullReconcile bool + } + tests := []struct { + name string + rookResources []runtime.Object + args args + want bool + wantLevel uint + wantErr bool + }{ + { + name: "objectstore replication should stay at 1, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephObjectStore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephObjectStore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: cephv1.ObjectStoreSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "my-store", + level: 1, + cephcluster: nil, + doFullReconcile: false, + }, + want: false, + wantLevel: 1, + wantErr: false, + }, + { + name: "objectstore replication should stay at 1, do full reconcile, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephObjectStore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephObjectStore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: cephv1.ObjectStoreSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "my-store", + level: 1, + cephcluster: nil, + doFullReconcile: true, + }, + want: true, + wantLevel: 1, + wantErr: false, + }, + { + name: "objectstore replication should increase from 1 to 3, rook version 1.9.12", + rookResources: []runtime.Object{ + &cephv1.CephObjectStore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "ceph.rook.io/v1", + Kind: "CephObjectStore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: cephv1.ObjectStoreSpec{ + MetadataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + DataPool: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 1, + }, + }, + }, + }, + }, + args: args{ + rookVersion: semver.MustParse("1.9.12"), + name: "my-store", + level: 3, + cephcluster: nil, + doFullReconcile: false, + }, + want: true, + wantLevel: 3, + wantErr: false, + }, + // TODO: rookVersion 1.0.4 + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rookClientset := rookfake.NewSimpleClientset(tt.rookResources...) + c := &Controller{ + Config: ControllerConfig{ + CephV1: rookClientset.CephV1(), + }, + Log: logger.NewDiscardLogger(), + } + got, err := c.SetObjectStoreReplication(tt.args.rookVersion, tt.args.name, tt.args.level, tt.args.cephcluster, tt.args.doFullReconcile) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.SetObjectStoreReplication() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Controller.SetObjectStoreReplication() = %v, want %v", got, tt.want) + } + cephOS, err := rookClientset.CephV1().CephObjectStores("rook-ceph").Get(context.Background(), "my-store", metav1.GetOptions{}) + if err != nil { + t.Errorf("CephObjectStores.Get(\"my-store\") error = %v", err) + return + } + if cephOS.Spec.MetadataPool.Replicated.Size != tt.wantLevel { + t.Errorf("CephObjectStore.Spec.MetadataPool.Replicated.Size = %d, want %d", cephOS.Spec.MetadataPool.Replicated.Size, tt.args.level) + } + if cephOS.Spec.DataPool.Replicated.Size != tt.wantLevel { + t.Errorf("CephObjectStore.Spec.DataPool.Replicated.Size = %d, want %d", cephOS.Spec.DataPool.Replicated.Size, tt.args.level) + } + }) + } +} diff --git a/pkg/ekcoops/operator.go b/pkg/ekcoops/operator.go index 051eb69..c652a34 100644 --- a/pkg/ekcoops/operator.go +++ b/pkg/ekcoops/operator.go @@ -255,9 +255,9 @@ func (o *Operator) adjustPoolReplicationLevels(rookVersion semver.Version, numNo // There is no CR to compare the desired and current level. // Assume that if cephblockpool replication level has not yet been set then we need to do the same for device_health_metrics. - _, err = o.controller.SetDeviceHealthMetricsReplication(rookVersion, o.config.CephBlockPool, factor, cephcluster, doFullReconcile || didUpdate) + _, err = o.controller.SetDeviceHealthMetricsReplication(rookVersion, cephcluster.Status.CephVersion.Version, o.config.CephBlockPool, factor, cephcluster, doFullReconcile || didUpdate) if err != nil { - multiErr = multierror.Append(multiErr, errors.Wrapf(err, "set health_device_metrics replication to %d", factor)) + multiErr = multierror.Append(multiErr, errors.Wrapf(err, "set device_health_metrics replication to %d", factor)) } return multiErr diff --git a/pkg/ekcoops/prometheus.go b/pkg/ekcoops/prometheus.go index fce1963..88a0a55 100644 --- a/pkg/ekcoops/prometheus.go +++ b/pkg/ekcoops/prometheus.go @@ -4,17 +4,12 @@ import ( "context" "github.com/pkg/errors" + "github.com/replicatedhq/ekco/pkg/k8s" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" ) -type patchInt64Value struct { - Op string `json:"op"` - Path string `json:"path"` - Value int64 `json:"value"` -} - func (o *Operator) ReconcilePrometheus(nodeCount int) error { prometheus, _ := o.controller.Config.PrometheusV1.Namespace("monitoring").Get(context.TODO(), "k8s", metav1.GetOptions{}) alertManager, _ := o.controller.Config.AlertManagerV1.Namespace("monitoring").Get(context.TODO(), "prometheus-alertmanager", metav1.GetOptions{}) @@ -31,8 +26,8 @@ func (o *Operator) ReconcilePrometheus(nodeCount int) error { o.log.Debugf("Ensuring k8s prometheus replicas are set to %d", desiredPrometheusReplicas) if currentPrometheusReplicas != desiredPrometheusReplicas { - prometheusPatch := []patchInt64Value{{ - Op: "replace", + prometheusPatch := []k8s.JSONPatchOperation{{ + Op: k8s.JSONPatchOpReplace, Path: "/spec/replicas", Value: desiredPrometheusReplicas, }} @@ -55,8 +50,8 @@ func (o *Operator) ReconcilePrometheus(nodeCount int) error { o.log.Debugf("Ensuring prometheus alert manager replicas are set to %d", desiredPrometheusReplicas) if currentAlertManagerReplicas != desiredAlertManagerReplicas { - alertManagersPatch := []patchInt64Value{{ - Op: "replace", + alertManagersPatch := []k8s.JSONPatchOperation{{ + Op: k8s.JSONPatchOpReplace, Path: "/spec/replicas", Value: desiredAlertManagerReplicas, }} diff --git a/pkg/k8s/exec.go b/pkg/k8s/exec.go index dee9747..6223ba9 100644 --- a/pkg/k8s/exec.go +++ b/pkg/k8s/exec.go @@ -34,6 +34,60 @@ type StreamOptions struct { Err io.Writer } +// SyncExecutorInterface is an interface for executing synchronous commands in +// a container. +type SyncExecutorInterface interface { + ExecContainer(ctx context.Context, namespace, pod, container string, command ...string) (exitCode int, stdout string, stderr string, err error) +} + +// SyncExecutor is a wrapper around k8s.io/client-go/util/exec that provides +// an interface for executing synchronous commands in a container. +type SyncExecutor struct { + coreClient corev1client.CoreV1Interface + restConfig *restclient.Config +} + +// NewSyncExecutor returns a new SyncExecutor for executing commands in a +// container. +func NewSyncExecutor(coreClient corev1client.CoreV1Interface, restConfig *restclient.Config) *SyncExecutor { + return &SyncExecutor{ + coreClient: coreClient, + restConfig: restConfig, + } +} + +// ExecContainer executes a remote execution against a pod. Returns exit code, +// standard out and standard error strings and an error. A non-zero exit code +// from the command is not considered an error. +func (e *SyncExecutor) ExecContainer(ctx context.Context, namespace, pod, container string, command ...string) (exitCode int, stdout string, stderr string, err error) { + return SyncExec(ctx, e.coreClient, e.restConfig, namespace, pod, container, command...) +} + +// SyncExec returns exitcode, stdout, stderr. A non-zero exit code from the command is not considered an error. +func SyncExec(ctx context.Context, coreClient corev1client.CoreV1Interface, clientConfig *restclient.Config, ns, pod, container string, command ...string) (int, string, string, error) { + var stdout bytes.Buffer + var stderr bytes.Buffer + + opts := ExecOptions{ + CoreClient: coreClient, + Config: clientConfig, + Command: command, + StreamOptions: StreamOptions{ + Namespace: ns, + PodName: pod, + ContainerName: container, + Out: &stdout, + Err: &stderr, + }, + } + exitCode, err := ExecContainer(ctx, opts, nil) + if exitCode != 0 { + err = nil + } + + return exitCode, stdout.String(), stderr.String(), err +} + // ExecContainer executes a remote execution against a pod. Returns exit code // and error. The error will be non-nil if exit code is not 0. func ExecContainer(ctx context.Context, opts ExecOptions, terminalSizeQueue remotecommand.TerminalSizeQueue) (int, error) { @@ -75,28 +129,3 @@ func ExecContainer(ctx context.Context, opts ExecOptions, terminalSizeQueue remo } return 0, nil } - -// SyncExec returns exitcode, stdout, stderr. A non-zero exit code from the command is not considered an error. -func SyncExec(coreClient corev1client.CoreV1Interface, clientConfig *restclient.Config, ns, pod, container string, command ...string) (int, string, string, error) { - var stdout bytes.Buffer - var stderr bytes.Buffer - - opts := ExecOptions{ - CoreClient: coreClient, - Config: clientConfig, - Command: command, - StreamOptions: StreamOptions{ - Namespace: ns, - PodName: pod, - ContainerName: container, - Out: &stdout, - Err: &stderr, - }, - } - exitCode, err := ExecContainer(context.TODO(), opts, nil) - if exitCode != 0 { - err = nil - } - - return exitCode, stdout.String(), stderr.String(), err -} diff --git a/pkg/k8s/mock/mock_exec.go b/pkg/k8s/mock/mock_exec.go new file mode 100644 index 0000000..d826bad --- /dev/null +++ b/pkg/k8s/mock/mock_exec.go @@ -0,0 +1,57 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/k8s/exec.go + +// Package mock_k8s is a generated GoMock package. +package mock_k8s + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockSyncExecutorInterface is a mock of SyncExecutorInterface interface. +type MockSyncExecutorInterface struct { + ctrl *gomock.Controller + recorder *MockSyncExecutorInterfaceMockRecorder +} + +// MockSyncExecutorInterfaceMockRecorder is the mock recorder for MockSyncExecutorInterface. +type MockSyncExecutorInterfaceMockRecorder struct { + mock *MockSyncExecutorInterface +} + +// NewMockSyncExecutorInterface creates a new mock instance. +func NewMockSyncExecutorInterface(ctrl *gomock.Controller) *MockSyncExecutorInterface { + mock := &MockSyncExecutorInterface{ctrl: ctrl} + mock.recorder = &MockSyncExecutorInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSyncExecutorInterface) EXPECT() *MockSyncExecutorInterfaceMockRecorder { + return m.recorder +} + +// ExecContainer mocks base method. +func (m *MockSyncExecutorInterface) ExecContainer(ctx context.Context, namespace, pod, container string, command ...string) (int, string, string, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, namespace, pod, container} + for _, a := range command { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecContainer", varargs...) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(string) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ExecContainer indicates an expected call of ExecContainer. +func (mr *MockSyncExecutorInterfaceMockRecorder) ExecContainer(ctx, namespace, pod, container interface{}, command ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, namespace, pod, container}, command...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecContainer", reflect.TypeOf((*MockSyncExecutorInterface)(nil).ExecContainer), varargs...) +} diff --git a/pkg/k8s/patch.go b/pkg/k8s/patch.go new file mode 100644 index 0000000..63c2a12 --- /dev/null +++ b/pkg/k8s/patch.go @@ -0,0 +1,17 @@ +package k8s + +const ( + // JSONPatchOpAdd constant op "add" + JSONPatchOpAdd = "add" + // JSONPatchOpRemove constant op "remove" + JSONPatchOpRemove = "remove" + // JSONPatchOpReplace constant op "replace" + JSONPatchOpReplace = "replace" +) + +// JSONPatchOperation specifies a single JSON patch operation. +type JSONPatchOperation struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} diff --git a/pkg/webhook/rook-priority.go b/pkg/webhook/rook-priority.go index fd34497..84454cb 100644 --- a/pkg/webhook/rook-priority.go +++ b/pkg/webhook/rook-priority.go @@ -11,6 +11,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + rookCephNS = "rook-ceph" +) + func (s *Server) rookPriority(c *gin.Context) { request := admissionv1.AdmissionReview{} response := admissionv1.AdmissionReview{ @@ -34,7 +38,7 @@ func (s *Server) rookPriority(c *gin.Context) { } else if request.Request != nil && request.Request.Resource.Group == "apps" && (request.Request.Resource.Resource == "deployments" || request.Request.Resource.Resource == "daemonsets") && - request.Request.Namespace == "rook-ceph" { + request.Request.Namespace == rookCephNS { log.Infof("Admission webhook mutating priority class for %s/%s", request.Request.Namespace, request.Request.Name) response.Response.Allowed = true