diff --git a/go.mod b/go.mod index 6513b2b8e3f3..94a9887058c4 100644 --- a/go.mod +++ b/go.mod @@ -69,38 +69,39 @@ require ( k8s.io/kubelet v0.21.1 k8s.io/kubernetes v1.21.1 k8s.io/legacy-cloud-providers v0.21.1 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + k8s.io/utils v0.0.0-20210521133846-da695404a2bc sigs.k8s.io/yaml v1.2.0 ) replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210603185452-2dfc46b23003 - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210603185452-2dfc46b23003 - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210603185452-2dfc46b23003 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210603185452-2dfc46b23003 - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210603185452-2dfc46b23003 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210603185452-2dfc46b23003 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210603185452-2dfc46b23003 - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210603185452-2dfc46b23003 - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210603185452-2dfc46b23003 - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210603185452-2dfc46b23003 - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210603185452-2dfc46b23003 - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210603185452-2dfc46b23003 - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210603185452-2dfc46b23003 - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210603185452-2dfc46b23003 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210603185452-2dfc46b23003 - k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210603185452-2dfc46b23003 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210603185452-2dfc46b23003 - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210603185452-2dfc46b23003 - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210603185452-2dfc46b23003 - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210603185452-2dfc46b23003 - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210603185452-2dfc46b23003 + github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210624185839-c6914a80ec2e + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210624185839-c6914a80ec2e + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210624185839-c6914a80ec2e + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210624185839-c6914a80ec2e + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210624185839-c6914a80ec2e + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210624185839-c6914a80ec2e + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210624185839-c6914a80ec2e + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210624185839-c6914a80ec2e + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210624185839-c6914a80ec2e + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210624185839-c6914a80ec2e + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210624185839-c6914a80ec2e + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210624185839-c6914a80ec2e + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210624185839-c6914a80ec2e + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210624185839-c6914a80ec2e + k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210624185839-c6914a80ec2e + k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210624185839-c6914a80ec2e + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210624185839-c6914a80ec2e + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210624185839-c6914a80ec2e + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210624185839-c6914a80ec2e + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210624185839-c6914a80ec2e + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210624185839-c6914a80ec2e ) diff --git a/go.sum b/go.sum index 076c8a065303..583adc1bb79c 100644 --- a/go.sum +++ b/go.sum @@ -619,76 +619,83 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/openshift/api v0.0.0-20210331162552-3e31249e6a55/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20210422150128-d8a48168c81c/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/api v0.0.0-20210521075222-e273a339932a h1:aBPwLqCg66SbQd+HrjB1GhgTfPtqSY4aeB022tEYmE0= github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= +github.com/openshift/apiserver-library-go v0.0.0-20210426120049-59b0e972bfb7/go.mod h1:nqn2IWld2A+Q9Lp/xGsbmUr2RyDCQixRU83yqAbymUM= github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf h1:b1YLQ5SAbjb/GmWpfmS5z6bVTWCJF+ywpd673LqbScc= github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf/go.mod h1:lhfpWyUaEs2xLx+eTgz012fNzRKiG7XYJ5QcQAgtyRQ= +github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e h1:F7rBobgSjtYL3/zsgDUjlTVx3Z06hdgpoldpDcn7jzc= github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/client-go v0.0.0-20210331195552-cf6c2669e01f/go.mod h1:hHaRJ6vp2MRd/CpuZ1oJkqnMGy5eEnoAkQmKPZKcUPI= +github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535/go.mod h1:v5/AYttPCjfqMGC1Ed/vutuDpuXmgWc5O+W9nwQ7EtE= github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a h1:h1tsWVPFkxJKmbkG9XcS1I5I12pTFY/7zYsry3NXxiY= github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI= -github.com/openshift/kubernetes v1.21.2-0.20210603185452-2dfc46b23003 h1:/wohdGu/T8OvblPY6W+jf/fN1VaQr3+bBaMFe/51rNc= -github.com/openshift/kubernetes v1.21.2-0.20210603185452-2dfc46b23003/go.mod h1:8HxCe4o8GMnqPPtYrd8ExDxXQ6Y7+d+mFWfW1YQofyk= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210603185452-2dfc46b23003 h1:oPkefa04tZaPRSfv0n/bg2UU7kJ8ASWm5mVR3bDS9B4= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210603185452-2dfc46b23003/go.mod h1:UaWmhLNhSCazQTJiX0vb8ncMktiBru0kczWzUSXiZxk= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210603185452-2dfc46b23003 h1:sdMJmaqibWH1/z/WeeAXVrvNiaMLJFG+NH1EJB2u3uk= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210603185452-2dfc46b23003/go.mod h1:gmpO3kMpGm2VhnRfh09rrjWaOJPXIxpOeSBw/qhdTrA= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210603185452-2dfc46b23003 h1:s4WzLbs+4jBw0GoEK0dw9e0NvCjKrm1nVD10PJbFUOc= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210603185452-2dfc46b23003/go.mod h1:+9NIFJkht9qnam6CoZabYqGs1X6zR24wZHCUhYGa8XM= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210603185452-2dfc46b23003 h1:QhZCdxA0ST2zCgIur6xmRVTLVP7WM0FB1xdEezj6ZZ8= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210603185452-2dfc46b23003/go.mod h1:FC4ZuNakh2Dy0kyfEax9TKp4xqJFJIXgphhOt+TpiwM= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210603185452-2dfc46b23003 h1:9DoaQ01EEs9pA2DIRStbApSnVMBR+MYRLy5U1DyYfbs= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210603185452-2dfc46b23003/go.mod h1:TvxGyzn8qNXzA+cPT3SP87Yvhul54cBQY4wgoZoDzno= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210603185452-2dfc46b23003 h1:mi7Qeu2vetnFw7ETMJR2G+pYeGZu5QdXVIjVD+V+raA= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210603185452-2dfc46b23003/go.mod h1:K95Y8RL4GRqrL2qKnE188qHIwNrcBMhO0hsloSE6Fhw= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210603185452-2dfc46b23003 h1:UF9PRwo85KQ/sLrVIGOtzzTcyCRR7PFLmh2MBTnFYD4= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210603185452-2dfc46b23003/go.mod h1:ypYqyXdHXZZBnY3Jr0Eie9EDr7qWCk+n7zxkmDEkkc0= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210603185452-2dfc46b23003 h1:5WGwIpkJ5UgNJt7KJBawvEYXsUUW3B2j21u18ZzjhFk= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210603185452-2dfc46b23003/go.mod h1:60txAiFf+HzsdIr3xT3hvqn+hwKz0cU3hEkeuLmKLQM= -github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210603185452-2dfc46b23003/go.mod h1:+2XbIeiOA7U4ptBBhf8CzvC/VllM/i8fj+f/Q6au5vA= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210603185452-2dfc46b23003 h1:qJNWhHajXlG1hgFFJmPhZDElHOrkDRTuBuWZ0Fu7jIA= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210603185452-2dfc46b23003/go.mod h1:X4R74iUuoKWsI0KrLOiVZD2VHrocQ46QBvcQcb/4knI= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210603185452-2dfc46b23003 h1:hvENMl0FyjYslGbpcdjsRJ0MgKnxVAm/gg9vzaYI1/o= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210603185452-2dfc46b23003/go.mod h1:RfuOpRwJvkv/P8XGJ4SePJvdR9n90njFUvWlvBbXM3A= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210603185452-2dfc46b23003/go.mod h1:2lRqZqhK5ZngSAF8xAR8iPc0JGIzkf88Vw+kNyidvLM= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210603185452-2dfc46b23003 h1:bPJJtz/BVXz3BXiv/vd/g0VUbF0/wSKbA/G8kgiprfQ= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210603185452-2dfc46b23003/go.mod h1:8a9+wxOscdSWUhL3k9ZL59Q/DmUJ0wlOAMalnNTNDSs= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210603185452-2dfc46b23003 h1:r9tXm3F12e4Gc0Y2zojDypK3Bc0IXz6mLW3zKi7HJZI= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210603185452-2dfc46b23003/go.mod h1:juK8ItOGGiCVV3KZh7/Jm/+QJHkzpLEmPg15ovsC5nw= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210603185452-2dfc46b23003 h1:KoTb/uscBGdNzvxvgZv0RoCj06kEtOGOhWFdXWLRqA0= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210603185452-2dfc46b23003/go.mod h1:1HHjyKteq4z+QOajDfGn13rVBVQAIvVGgGwVOqzr4Ek= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210603185452-2dfc46b23003/go.mod h1:xONYhboWRO+jBbcHH8QFU3s19PyEFF/RfsvhtKHa3eI= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210603185452-2dfc46b23003 h1:U+qGtsbaMj5A2rl/RGED3pht0S1wclKxxuQNQbrUVjU= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210603185452-2dfc46b23003/go.mod h1:Orfp0jnRIGgoOmRs40Bz9NEGiK0r4FdL3RrnlyAC65o= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210603185452-2dfc46b23003 h1:1X8lnh05eGGHtsjdR0W5i/2q4QoFHjkrdUi3K3S6v2o= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210603185452-2dfc46b23003/go.mod h1:EBlODbbqjii99HCfAGTHvptjj3CwlzO9XXf7W1p4zNg= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210603185452-2dfc46b23003 h1:kRauw8eRNZXAX39mt0FpFVdo5yTYVjtVoNQLwO+MnqY= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210603185452-2dfc46b23003/go.mod h1:DJfglO3Qszn8Hu4V+NOXyVpvTCGNnl98a4ekZBhSOE4= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210603185452-2dfc46b23003 h1:Sso2Tec7WG8kmLTAfnwzoJocFr3sZiVTFsuhKdGqdes= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210603185452-2dfc46b23003/go.mod h1:ubZB7tOrDLJIcsCvtA15lyNAjLPMml0LUeMm1WVNi80= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210603185452-2dfc46b23003 h1:Dv/bFa+nNuyRk2E9V3Imce1bYBuWCbzyUx3mULpY6mA= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210603185452-2dfc46b23003/go.mod h1:/UVGDbOAe3X1rjBVvor7EuiiprzU6LDKGf6C3/m8kck= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210603185452-2dfc46b23003 h1:0JXdo/lB2md8pcaqjmnbOrXGAWWfAwT3BgQNVVmbioM= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210603185452-2dfc46b23003/go.mod h1:R3N6WaOJSt9L2VSYsKFYaz6keliSzDbCbw03STRdEQI= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210603185452-2dfc46b23003 h1:xXfb1zxLUdk7D7WJdsee0WskiMjGzyNUvXENl1Ab3aQ= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210603185452-2dfc46b23003/go.mod h1:99KFJSKqMgMvpCWJr4w6ooLZgR+2usWp5GPaILFNq9k= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210603185452-2dfc46b23003 h1:T/6UPUhAGeIfpodyXws8MPtBV8WSgWy7+85oBtaSUvA= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210603185452-2dfc46b23003/go.mod h1:tWKWmokpVN3JsAvixr41tCj0vywHYtX0oXmVCvCYPkw= +github.com/openshift/kubernetes v1.21.2-0.20210624185839-c6914a80ec2e h1:smlOVtuXwwvWpl+aHSBrQFPGngwnT8m4Cuv2Muzzx+Y= +github.com/openshift/kubernetes v1.21.2-0.20210624185839-c6914a80ec2e/go.mod h1:BfvpPHeUwHN9X0gvquIOoeGaJSq/mgkjLOVBHFEp7I0= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210624185839-c6914a80ec2e h1:YkrFTWrKq5IvWdxNmMHK+gh6rVF3E4/n+7xdz9HiPlc= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:SnWgEf0wrKpyTr19ZGEuc1r6YVq56uw56wMQbb1Agw0= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210624185839-c6914a80ec2e h1:alsJXyy7iDSbCINpEEalQZ2ArofUvBf5kA+GDVnPqlw= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:1yWiW8qTxfkFNvFfBkHkbVf2QaR39hLDrRaFg81NZOI= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210624185839-c6914a80ec2e h1:tVNyQbQNQ8oRtQC/f0P7qbHBFa4enZe01GnDHS3uHIc= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:+9NIFJkht9qnam6CoZabYqGs1X6zR24wZHCUhYGa8XM= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210624185839-c6914a80ec2e h1:1MBaIrpcF+KhAr1FM9PLt6QKYdq9c8dxdGvdVWHhGSg= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:GyyrtZJPiIAPqHeg7q2jkVVQuwNRI2wjmdPJy6Aq5WM= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210624185839-c6914a80ec2e h1:OiHTH4y5lze1FOfQeUAOpYvfV4woqijnrrtnlqXTpPk= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:6OC79mn6YWVmifl1gFMrgExtBXITk0KB/mcmyZPElP4= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210624185839-c6914a80ec2e h1:5g4f84CSI/8FiKVAPM2UM9rnZWwwMKkGJiI+12UpYxU= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:s4qyeR7ffm9Vm04y1Cmal04ZY9FzdOamjk/lMq4gLeE= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210624185839-c6914a80ec2e h1:cImi1l1qrp49Zt9uNjl/0zO+MDNRbR2pFq3vPDG7Qdc= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:gm0LqJzsDkxj6NPk3UIwtADjFbUI2rlE5ntwPR4ZHMM= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210624185839-c6914a80ec2e h1:ZLfnJrESwi4T4FMu9y/gLyCPyELoIfVGtbpYDcahf3c= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:60txAiFf+HzsdIr3xT3hvqn+hwKz0cU3hEkeuLmKLQM= +github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:+2XbIeiOA7U4ptBBhf8CzvC/VllM/i8fj+f/Q6au5vA= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210624185839-c6914a80ec2e h1:nvgUZLVj0D3HFcj1XO7ZkT/m972wT5a6A1YvJ1ToDMc= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:ZsYj+LAOMewPhu0swN6I2O4cmmN2GkqudDNMXv4mj60= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210624185839-c6914a80ec2e h1:8+Dl5Oly+wuCeg8LYy9qflSIAOSZEZ9M1dIgU71pjCI= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:If3eXdFPvI+A7qLvMR+D4a6/5ft6VgbCs+rx51QobqM= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:nC6S9LLLJSZMkhgYWN324CUpIVILNJE1w4OwkYI7nz8= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210624185839-c6914a80ec2e h1:9Ms+sFib+puCHtXez207W5bEdBob37mZ0GiabjMhY04= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:8a9+wxOscdSWUhL3k9ZL59Q/DmUJ0wlOAMalnNTNDSs= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210624185839-c6914a80ec2e h1:vUrkmb23Wr14yEOPYhbP/eHffn4q7VIjy5bZdPBUL8k= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:juK8ItOGGiCVV3KZh7/Jm/+QJHkzpLEmPg15ovsC5nw= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210624185839-c6914a80ec2e h1:lAvfSl7D8hnzVU/BHgR59M5U/MA2oJAzuJm8RJyR20A= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:KiO+CjoBfZvD/nQerJmXX8LVVhzTitAVsGzjDvwdAk4= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:+m6UVCDbs8U3U5/Dcp0MuW4KStrCVhQmw4saP1UI4F4= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210624185839-c6914a80ec2e h1:dK3WptI+F8K8iGz5Io6dqHiw+IsGyUQWlO2di+awDw0= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:Orfp0jnRIGgoOmRs40Bz9NEGiK0r4FdL3RrnlyAC65o= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210624185839-c6914a80ec2e h1:8/MUNNhgGEfWa70P5b8CuK150Cmj6UFJROrR1fFM7zY= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:EBlODbbqjii99HCfAGTHvptjj3CwlzO9XXf7W1p4zNg= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210624185839-c6914a80ec2e h1:r9nItu7VQ7CX9jzLFCrnuLmprk3+/u9WDVsCqgRzYJY= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:0q2QnUSyCDMQchTq6QF71Rmgo0zY7xuE1e0ysZ2OOtU= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210624185839-c6914a80ec2e h1:C9f59t+OAdOHF0S7kPQW4amZeK2EJTE4nNpzUwQnLnk= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:ubZB7tOrDLJIcsCvtA15lyNAjLPMml0LUeMm1WVNi80= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210624185839-c6914a80ec2e h1:BDwYsnlrS+uVVSkeK3hWCgfRkhnr+IFw4UH8wR2kPqg= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:ATg4LAjgccn9gRL4RTWmfO7lPngHPT87xUTbsK+WQFo= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210624185839-c6914a80ec2e h1:Ez3lNFYP5ei4S4lud6hFJ0bAOXvjCc90ACsMUmVHyMk= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:R3N6WaOJSt9L2VSYsKFYaz6keliSzDbCbw03STRdEQI= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210624185839-c6914a80ec2e h1:vCcmBcMYDv8deGPwiw4fE3f8O5NGL7Nrc2sjHUxtp+c= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:5O5i/CheboeAm/bnttfTi3dcpL5KBRjYjFQQVYaaP28= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210624185839-c6914a80ec2e h1:QIyWV7eNQ3YSiohWeqWSrxzdisAG4hn5suW6HEg1GkI= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210624185839-c6914a80ec2e/go.mod h1:uRHcGzksxeAsANIOCE+K5vWMaqY9f2Jv68ZkqY0oqRM= +github.com/openshift/library-go v0.0.0-20210331235027-66936e2fcc52/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= +github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca h1:NtRAdQTnE4B+UESOUaCSX3dw1uc+PpI1h2X7hUmE/5A= github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca/go.mod h1:87ZYjEncF0YNUKNzncb8Fiw8yFNevpIWZW83C/etzpw= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible h1:6XSBotNi58b4MwVV4F9o/jd4BaQd+uJyz+s5TR0/ot8= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible/go.mod h1:azqkkH4Vpp9A579CC26hicol/wViXag9rOwElif6v9E= +github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e h1:jaVXoepwhg7wXGjfOZRtbWg45IIp3BVwWVKTZgsNcrE= +github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1249,8 +1256,9 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 75d935f6f7af..daec747a0321 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -12035,7 +12035,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] Mounted volume expand Should verify mounted devices can be resized": "Should verify mounted devices can be resized [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist": "should only be allowed to provision PDs in zones where nodes exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist": "should only be allowed to provision PDs in zones where nodes exist [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs": "should schedule pods in the same zones as statically provisioned PVs [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -12543,7 +12543,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] [Serial] Volume metrics should create volume metrics in Volume Manager": "should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] [Serial] Volume metrics should create volume metrics with the correct PVC ref": "should create volume metrics with the correct PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] [Serial] Volume metrics should create volume metrics with the correct BlockMode PVC ref": "should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] [Serial] Volume metrics should create volume metrics with the correct FilesystemMode PVC ref": "should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] vcp at scale [Feature:vsphere] vsphere scale tests": "vsphere scale tests [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 91c314e09eaa..24ef91ea2e74 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -306,7 +306,7 @@ func getUnitName(c *configs.Cgroup) string { // isDbusError returns true if the error is a specific dbus error. func isDbusError(err error, name string) bool { if err != nil { - var derr *dbus.Error + var derr dbus.Error if errors.As(err, &derr) { return strings.Contains(derr.Name, name) } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index ece834e92925..623870c4f61e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -57,6 +57,50 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies update if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped. + // + // The kube-apiserver will apply update on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index b264b5ab5596..185de828cf32 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -774,7 +774,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd utilruntime.HandleError(err) return nil, fmt.Errorf("the server could not properly serve the CR columns") } - table, err := tableconvertor.New(columns) + table, err := tableconvertor.New(columns, schema.GroupVersionKind{crd.Spec.Group, v.Name, crd.Spec.Names.Kind}) if err != nil { klog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go new file mode 100644 index 000000000000..9538eb79a8af --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/patch_clusteroperators.go @@ -0,0 +1,94 @@ +package tableconvertor + +import ( + "encoding/json" + "io" + "reflect" + + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/registry/rest" +) + +var clusterOperatorGVK = schema.GroupVersionKind{configv1.GroupName, "v1", "ClusterOperator"} + +func withClusterOperatorColumns(c *convertor, gvk schema.GroupVersionKind) rest.TableConvertor { + if gvk != clusterOperatorGVK { + return c + } + + c.headers = append(c.headers, metav1.TableColumnDefinition{ + Name: "Message", + Type: "string", + Description: "A message describing the status of the operator", + Priority: 0, + }) + c.additionalColumns = append(c.additionalColumns, clusterOperatorConditionMessage{}) + + return c +} + +type clusterOperatorConditionMessage struct { +} + +func (c clusterOperatorConditionMessage) FindResults(data interface{}) ([][]reflect.Value, error) { + obj := data.(map[string]interface{}) + unstructuredConds, _, _ := unstructured.NestedFieldNoCopy(obj, "status", "conditions") + var conds []configv1.ClusterOperatorStatusCondition + bs, err := json.Marshal(unstructuredConds) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bs, &conds); err != nil { + return nil, err + } + + var available, degraded, progressing *configv1.ClusterOperatorStatusCondition + for i := range conds { + cond := &conds[i] + switch { + case cond.Type == configv1.OperatorAvailable && cond.Status == configv1.ConditionFalse: + available = cond + case cond.Type == configv1.OperatorDegraded && cond.Status == configv1.ConditionTrue: + degraded = cond + case cond.Type == configv1.OperatorProgressing && cond.Status == configv1.ConditionTrue: + progressing = cond + } + } + + mostCritical := progressing + if degraded != nil { + mostCritical = degraded + } + if available != nil { + mostCritical = available + } + + if mostCritical != nil { + if len(mostCritical.Message) > 0 { + return [][]reflect.Value{{reflect.ValueOf(mostCritical.Message)}}, nil + } + if len(mostCritical.Reason) > 0 { + return [][]reflect.Value{{reflect.ValueOf(mostCritical.Reason)}}, nil + } + } + + return nil, nil +} + +func (c clusterOperatorConditionMessage) PrintResults(wr io.Writer, results []reflect.Value) error { + first := true + for _, r := range results { + if !first { + wr.Write([]byte("; ")) // should never happen as we only return one result + } + if _, err := wr.Write([]byte(r.String())); err != nil { + return err + } + first = false + } + + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go index ba807103cde3..6fbc83b0a4c3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go @@ -21,13 +21,15 @@ import ( "context" "encoding/json" "fmt" + "io" "reflect" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metatable "k8s.io/apimachinery/pkg/api/meta/table" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/client-go/util/jsonpath" ) @@ -36,7 +38,7 @@ var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() // New creates a new table convertor for the provided CRD column definition. If the printer definition cannot be parsed, // error will be returned along with a default table convertor. -func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition) (rest.TableConvertor, error) { +func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition, gvk schema.GroupVersionKind) (rest.TableConvertor, error) { headers := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, } @@ -66,12 +68,17 @@ func New(crdColumns []apiextensionsv1.CustomResourceColumnDefinition) (rest.Tabl }) } - return c, nil + return withClusterOperatorColumns(c, gvk), nil +} + +type column interface { + FindResults(data interface{}) ([][]reflect.Value, error) + PrintResults(wr io.Writer, results []reflect.Value) error } type convertor struct { headers []metav1.TableColumnDefinition - additionalColumns []*jsonpath.JSONPath + additionalColumns []column } func (c *convertor) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 32f075782a9a..00ce5f785c8b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -78,8 +78,6 @@ type Connection interface { // SetIdleTimeout sets the amount of time the connection may remain idle before // it is automatically closed. SetIdleTimeout(timeout time.Duration) - // RemoveStreams can be used to remove a set of streams from the Connection. - RemoveStreams(streams ...Stream) } // Stream represents a bidirectional communications channel that is part of an diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go index 3da7457f4827..21b2568d9003 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -31,7 +31,7 @@ import ( // streams. type connection struct { conn *spdystream.Connection - streams map[uint32]httpstream.Stream + streams []httpstream.Stream streamLock sync.Mutex newStreamHandler httpstream.NewStreamHandler ping func() (time.Duration, error) @@ -85,12 +85,7 @@ func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.New // will be invoked when the server receives a newly created stream from the // client. func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection { - c := &connection{ - conn: conn, - newStreamHandler: newStreamHandler, - ping: pingFn, - streams: make(map[uint32]httpstream.Stream), - } + c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn} go conn.Serve(c.newSpdyStream) if pingPeriod > 0 && pingFn != nil { go c.sendPings(pingPeriod) @@ -110,7 +105,7 @@ func (c *connection) Close() error { // calling Reset instead of Close ensures that all streams are fully torn down s.Reset() } - c.streams = make(map[uint32]httpstream.Stream, 0) + c.streams = make([]httpstream.Stream, 0) c.streamLock.Unlock() // now that all streams are fully torn down, it's safe to call close on the underlying connection, @@ -119,15 +114,6 @@ func (c *connection) Close() error { return c.conn.Close() } -// RemoveStreams can be used to removes a set of streams from the Connection. -func (c *connection) RemoveStreams(streams ...httpstream.Stream) { - c.streamLock.Lock() - for _, stream := range streams { - delete(c.streams, stream.Identifier()) - } - c.streamLock.Unlock() -} - // CreateStream creates a new stream with the specified headers and registers // it with the connection. func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { @@ -147,7 +133,7 @@ func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error // it owns. func (c *connection) registerStream(s httpstream.Stream) { c.streamLock.Lock() - c.streams[s.Identifier()] = s + c.streams = append(c.streams, s) c.streamLock.Unlock() } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 3dea7fe7f9eb..afb24876adfe 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -205,10 +205,29 @@ var ErrWaitTimeout = errors.New("timed out waiting for the condition") // if the loop should be aborted. type ConditionFunc func() (done bool, err error) +// ConditionWithContextFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +// +// The caller passes along a context that can be used by the condition function. +type ConditionWithContextFunc func(context.Context) (done bool, err error) + +// WithContext converts a ConditionFunc into a ConditionWithContextFunc +func (cf ConditionFunc) WithContext() ConditionWithContextFunc { + return func(context.Context) (done bool, err error) { + return cf() + } +} + // runConditionWithCrashProtection runs a ConditionFunc with crash protection func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) +} + +// runConditionWithCrashProtectionWithContext runs a +// ConditionWithContextFunc with crash protection. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { defer runtime.HandleCrash() - return condition() + return condition(ctx) } // Backoff holds parameters applied to a Backoff function. @@ -418,38 +437,42 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { // // If you want to Poll something forever, see PollInfinite. func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return pollInternal(poller(interval, timeout), condition) -} - -func pollInternal(wait WaitFunc, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(wait, condition, done) + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. // -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. // // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return pollImmediateInternal(poller(interval, timeout), condition) +// If you want to Poll something forever, see PollInfinite. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) } -func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return pollInternal(wait, condition) +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return PollUntilWithContext(ctx, interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) } // PollInfinite tries a condition func until it returns true or an error @@ -459,37 +482,45 @@ func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { // Some intervals may be missed if the condition takes too long or the time // window is too short. func PollInfinite(interval time.Duration, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return PollUntil(interval, condition, done) + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) } -// PollImmediateInfinite tries a condition func until it returns true or an error +// PollInfiniteWithContext tries a condition func until it returns true or an error // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// PollInfiniteWithContext always waits the interval before the run of 'condition'. // // Some intervals may be missed if the condition takes too long or the time // window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return PollInfinite(interval, condition) +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) } -// PollUntil tries a condition func until it returns true, an error or stopCh is -// closed. +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. // -// PollUntil always waits interval before the first run of 'condition'. +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return WaitFor(poller(interval, 0), condition, ctx.Done()) +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -497,18 +528,67 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st // PollImmediateUntil runs the 'condition' before waiting for the interval. // 'condition' will always be invoked at least once. func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil + ctx, cancel := contextForChannel(stopCh) + defer cancel() + return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } } + select { - case <-stopCh: + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility return ErrWaitTimeout default: - return PollUntil(interval, condition, stopCh) + return WaitForWithContext(ctx, wait, condition) } } @@ -516,6 +596,20 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} +// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc +func (w WaitFunc) WithContext() WaitWithContextFunc { + return func(ctx context.Context) <-chan struct{} { + return w(ctx.Done()) + } +} + +// WaitWithContextFunc creates a channel that receives an item every time a test +// should be executed and is closed when the last test should be invoked. +// +// When the specified context gets cancelled or expires the function +// stops sending item and returns immediately. +type WaitWithContextFunc func(ctx context.Context) <-chan struct{} + // WaitFor continually checks 'fn' as driven by 'wait'. // // WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value @@ -532,13 +626,35 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{} // "uniform pseudo-random", the `fn` might still run one or multiple time, // though eventually `WaitFor` will return. func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - stopCh := make(chan struct{}) - defer close(stopCh) - c := wait(stopCh) + ctx, cancel := contextForChannel(done) + defer cancel() + return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) +} + +// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// +// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn' +// once for every value placed on the channel and once more when the +// channel is closed. If the channel is closed and 'fn' +// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// +// If 'fn' returns an error the loop ends and that error is returned. If +// 'fn' returns true the loop ends and nil is returned. +// +// context.Canceled will be returned if the ctx.Done() channel is closed +// without fn ever returning true. +// +// When the ctx.Done() channel is closed, because the golang `select` statement is +// "uniform pseudo-random", the `fn` might still run one or multiple times, +// though eventually `WaitForWithContext` will return. +func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { + waitCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := wait(waitCtx) for { select { case _, open := <-c: - ok, err := runConditionWithCrashProtection(fn) + ok, err := runConditionWithCrashProtectionWithContext(ctx, fn) if err != nil { return err } @@ -548,7 +664,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { if !open { return ErrWaitTimeout } - case <-done: + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility return ErrWaitTimeout } } @@ -564,8 +681,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitFunc { - return WaitFunc(func(done <-chan struct{}) <-chan struct{} { +func poller(interval, timeout time.Duration) WaitWithContextFunc { + return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { ch := make(chan struct{}) go func() { @@ -595,7 +712,7 @@ func poller(interval, timeout time.Duration) WaitFunc { } case <-after: return - case <-done: + case <-ctx.Done(): return } } diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go index a3a1dddc6ff0..61bbca8dc0b5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -412,8 +412,14 @@ var ( func newPriorityLevelConfiguration(name string, spec flowcontrol.PriorityLevelConfigurationSpec) *flowcontrol.PriorityLevelConfiguration { return &flowcontrol.PriorityLevelConfiguration{ - ObjectMeta: metav1.ObjectMeta{Name: name}, - Spec: spec} + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + flowcontrol.AutoUpdateAnnotationKey: "true", + }, + }, + Spec: spec, + } } func newFlowSchema(name, plName string, matchingPrecedence int32, dmType flowcontrol.FlowDistinguisherMethodType, rules ...flowcontrol.PolicyRulesWithSubjects) *flowcontrol.FlowSchema { @@ -422,7 +428,12 @@ func newFlowSchema(name, plName string, matchingPrecedence int32, dmType flowcon dm = &flowcontrol.FlowDistinguisherMethod{Type: dmType} } return &flowcontrol.FlowSchema{ - ObjectMeta: metav1.ObjectMeta{Name: name}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + flowcontrol.AutoUpdateAnnotationKey: "true", + }, + }, Spec: flowcontrol.FlowSchemaSpec{ PriorityLevelConfiguration: flowcontrol.PriorityLevelConfigurationReference{ Name: plName, diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 92acf6381d40..09612984c90e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -78,7 +77,7 @@ var ( Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code.", StabilityLevel: compbasemetrics.STABLE, }, - []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code"}, + []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code", "system_client"}, ) longRunningRequestGauge = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ @@ -438,12 +437,19 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() - requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, codeToString(httpCode)).Inc() - // MonitorRequest happens after authentication, so we can trust the username given by the request - info, ok := request.UserFrom(req.Context()) - if ok && info.GetName() == user.APIServerUser { - apiSelfRequestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, resource, subresource).Inc() + + systemClient := "" + if uas := strings.SplitN(req.UserAgent(), "/", 2); len(uas) > 1 { + switch uas[0] { + case "kube-apiserver": + apiSelfRequestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, resource, subresource).Inc() + fallthrough + case "kube-controller-manager", "kube-scheduler", "cluster-policy-controller": + systemClient = uas[0] + } } + requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, codeToString(httpCode), systemClient).Inc() + if deprecated { deprecatedRequestGauge.WithContext(req.Context()).WithLabelValues(group, version, resource, subresource, removedRelease).Set(1) audit.AddAuditAnnotation(req.Context(), deprecatedAnnotationKey, "true") diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 2484bfc76c8f..70c8d8b855c4 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -47,7 +47,10 @@ const ( observationMaintenancePeriod = 10 * time.Second ) -var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") +var ( + nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") + watchVerbs = sets.NewString("watch") +) func handleError(w http.ResponseWriter, r *http.Request, err error) { errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 186824e2f265..23ea5b7287ad 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -17,9 +17,9 @@ limitations under the License. package filters import ( - "context" "fmt" "net/http" + "runtime" "sync/atomic" flowcontrol "k8s.io/api/flowcontrol/v1beta1" @@ -31,10 +31,6 @@ import ( "k8s.io/klog/v2" ) -type priorityAndFairnessKeyType int - -const priorityAndFairnessKey priorityAndFairnessKeyType = iota - // PriorityAndFairnessClassification identifies the results of // classification for API Priority and Fairness type PriorityAndFairnessClassification struct { @@ -44,12 +40,6 @@ type PriorityAndFairnessClassification struct { PriorityLevelUID apitypes.UID } -// GetClassification returns the classification associated with the -// given context, if any, otherwise nil -func GetClassification(ctx context.Context) *PriorityAndFairnessClassification { - return ctx.Value(priorityAndFairnessKey).(*PriorityAndFairnessClassification) -} - // waitingMark tracks requests waiting rather than being executed var waitingMark = &requestWatermark{ phase: epmetrics.WaitingPhase, @@ -60,6 +50,9 @@ var waitingMark = &requestWatermark{ var atomicMutatingExecuting, atomicReadOnlyExecuting int32 var atomicMutatingWaiting, atomicReadOnlyWaiting int32 +// newInitializationSignal is defined for testing purposes. +var newInitializationSignal = utilflowcontrol.NewInitializationSignal + // WithPriorityAndFairness limits the number of in-flight // requests in a fine-grained way. func WithPriorityAndFairness( @@ -84,8 +77,10 @@ func WithPriorityAndFairness( return } - // Skip tracking long running requests. - if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) { + isWatchRequest := watchVerbs.Has(requestInfo.Verb) + + // Skip tracking long running non-watch requests. + if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) && !isWatchRequest { klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user) handler.ServeHTTP(w, r) return @@ -116,15 +111,53 @@ func WithPriorityAndFairness( waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta))) } } + var resultCh chan interface{} + if isWatchRequest { + resultCh = make(chan interface{}) + } execute := func() { noteExecutingDelta(1) defer noteExecutingDelta(-1) served = true - innerCtx := context.WithValue(ctx, priorityAndFairnessKey, classification) - innerReq := r.Clone(innerCtx) + + innerCtx := ctx + innerReq := r + + var watchInitializationSignal utilflowcontrol.InitializationSignal + if isWatchRequest { + watchInitializationSignal = newInitializationSignal() + innerCtx = utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal) + innerReq = r.Clone(innerCtx) + } setResponseHeaders(classification, w) - handler.ServeHTTP(w, innerReq) + if isWatchRequest { + go func() { + defer func() { + err := recover() + // do not wrap the sentinel ErrAbortHandler panic value + if err != nil && err != http.ErrAbortHandler { + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err = fmt.Sprintf("%v\n%s", err, buf) + } + resultCh <- err + }() + + // Protect from the situations when request will not reach storage layer + // and the initialization signal will not be send. + defer watchInitializationSignal.Signal() + + handler.ServeHTTP(w, innerReq) + }() + + watchInitializationSignal.Wait() + } else { + handler.ServeHTTP(w, innerReq) + } } digest := utilflowcontrol.RequestDigest{RequestInfo: requestInfo, User: user} fcIfc.Handle(ctx, digest, note, func(inQueue bool) { @@ -143,9 +176,23 @@ func WithPriorityAndFairness( epmetrics.DroppedRequests.WithContext(ctx).WithLabelValues(epmetrics.ReadOnlyKind).Inc() } epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests) + if isWatchRequest { + close(resultCh) + } tooManyRequests(r, w) } + // For watch requests, from the APF point of view the request is already + // finished at this point. However, that doesn't mean it is already finished + // from the non-APF point of view. So we need to wait here until the request is: + // 1) finished being processed or + // 2) rejected + if isWatchRequest { + err := <-resultCh + if err != nil { + panic(err) + } + } }) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 23ffc3ae31e5..39e8db0b7673 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -38,6 +38,7 @@ import ( "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" utiltrace "k8s.io/utils/trace" @@ -1413,6 +1414,14 @@ func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEven klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", len(initEvents), objType, c.identifier, processingTime) } + // At this point we already start processing incoming watch events. + // However, the init event can still be processed because their serialization + // and sending to the client happens asynchrnously. + // TODO: As describe in the KEP, we would like to estimate that by delaying + // the initialization signal proportionally to the number of events to + // process, but we're leaving this to the tuning phase. + utilflowcontrol.WatchInitialized(ctx) + defer close(c.result) defer c.Stop() for { diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index bd87382e83f1..0a6f4bc3c15c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -32,6 +32,7 @@ import ( "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "go.etcd.io/etcd/clientv3" "k8s.io/klog/v2" @@ -120,6 +121,14 @@ func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, p } wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, pred) go wc.run() + + // For etcd watch we don't have an easy way to answer whether the watch + // has already caught up. So in the initial version (given that watchcache + // is by default enabled for all resources but Events), we just deliver + // the initialization signal immediately. Improving this will be explored + // in the future. + utilflowcontrol.WatchInitialized(ctx) + return wc, nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go new file mode 100644 index 000000000000..6497e3fff5ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "context" + "sync" +) + +type priorityAndFairnessKeyType int + +const ( + // priorityAndFairnessInitializationSignalKey is a key under which + // initialization signal function for watch requests is stored + // in the context. + priorityAndFairnessInitializationSignalKey priorityAndFairnessKeyType = iota +) + +// WithInitializationSignal creates a copy of parent context with +// priority and fairness initialization signal value. +func WithInitializationSignal(ctx context.Context, signal InitializationSignal) context.Context { + return context.WithValue(ctx, priorityAndFairnessInitializationSignalKey, signal) +} + +// initializationSignalFrom returns an initialization signal function +// which when called signals that watch initialization has already finished +// to priority and fairness dispatcher. +func initializationSignalFrom(ctx context.Context) (InitializationSignal, bool) { + signal, ok := ctx.Value(priorityAndFairnessInitializationSignalKey).(InitializationSignal) + return signal, ok && signal != nil +} + +// WatchInitialized sends a signal to priority and fairness dispatcher +// that a given watch request has already been initialized. +func WatchInitialized(ctx context.Context) { + if signal, ok := initializationSignalFrom(ctx); ok { + signal.Signal() + } +} + +// InitializationSignal is an interface that allows sending and handling +// initialization signals. +type InitializationSignal interface { + // Signal notifies the dispatcher about finished initialization. + Signal() + // Wait waits for the initialization signal. + Wait() +} + +type initializationSignal struct { + once sync.Once + done chan struct{} +} + +func NewInitializationSignal() InitializationSignal { + return &initializationSignal{ + once: sync.Once{}, + done: make(chan struct{}), + } +} + +func (i *initializationSignal) Signal() { + i.once.Do(func() { close(i.done) }) +} + +func (i *initializationSignal) Wait() { + <-i.done +} diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index 1873a8f04a72..3e7fa732ac12 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -16,18 +16,16 @@ require ( k8s.io/component-base v0.21.0-rc.0 k8s.io/controller-manager v0.0.0 k8s.io/klog/v2 v2.8.0 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + k8s.io/utils v0.0.0-20210521133846-da695404a2bc ) replace ( github.com/go-bindata/go-bindata => github.com/go-bindata/go-bindata v3.1.1+incompatible + github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a github.com/imdario/mergo => github.com/imdario/mergo v0.3.5 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.0.9 github.com/onsi/ginkgo => github.com/openshift/ginkgo v4.7.0-origin.0+incompatible - github.com/openshift/api => github.com/openshift/api v0.0.0-20210422150128-d8a48168c81c - github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20210426120049-59b0e972bfb7 - github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 - github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 + github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e github.com/robfig/cron => github.com/robfig/cron v1.1.0 go.uber.org/multierr => go.uber.org/multierr v1.1.0 k8s.io/api => ../api diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 262855749a32..74a240799dd7 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -96,7 +96,6 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= @@ -105,7 +104,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/container-storage-interface/spec v1.3.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -130,7 +128,6 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= @@ -297,7 +294,6 @@ github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.39.0/go.mod h1:rjQFmK4jPCpxeUdLq9bYhNFFsjgGOtpnDmDeap0+nsw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -450,7 +446,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= @@ -485,21 +480,23 @@ github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/openshift/api v0.0.0-20210331162552-3e31249e6a55/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/api v0.0.0-20210422150128-d8a48168c81c/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/apiserver-library-go v0.0.0-20210426120049-59b0e972bfb7/go.mod h1:nqn2IWld2A+Q9Lp/xGsbmUr2RyDCQixRU83yqAbymUM= github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/client-go v0.0.0-20210331195552-cf6c2669e01f/go.mod h1:hHaRJ6vp2MRd/CpuZ1oJkqnMGy5eEnoAkQmKPZKcUPI= github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535/go.mod h1:v5/AYttPCjfqMGC1Ed/vutuDpuXmgWc5O+W9nwQ7EtE= github.com/openshift/ginkgo v4.7.0-origin.0+incompatible h1:2qD1n/RAnycWMPjYS6MEAUzRmVoF0ql7ozk1ANv8dcM= github.com/openshift/ginkgo v4.7.0-origin.0+incompatible/go.mod h1:8METQ1gDhl0KW+pGH4c0DIJYEN/ksVCL6hOuHPmXGnk= +github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI= +github.com/openshift/library-go v0.0.0-20210331235027-66936e2fcc52/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 h1:/6Xf107BJIzdfRe9xfuU4xnx7TUHQ7vzDMWiNYPmxfM= github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= +github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -790,7 +787,6 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -800,7 +796,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1004,8 +999,9 @@ k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7Br k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kubernetes v1.21.0-rc.0/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index e0db751b7ab8..ee7b39ed88b6 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -145,7 +145,7 @@ var ( }, "[Skipped:gce]": { // Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x - `\[sig-scheduling\] Multi-AZ Cluster Volumes \[sig-storage\] should only be allowed to provision PDs in zones where nodes exist`, + `\[sig-storage\] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist`, // The following tests try to ssh directly to a node. None of our nodes have external IPs `\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`, diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go index 8de4a00251e7..7885ebe6aab6 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go @@ -239,6 +239,13 @@ func (a *managementCPUsOverride) Admit(ctx context.Context, attr admission.Attri return nil } + // we should skip mutation of the pod that has container with both CPU limit and request because once we will remove + // the request, the defaulter will set the request back with the CPU limit value + if podHasBothCPULimitAndRequest(allContainers) { + pod.Annotations[workloadAdmissionWarning] = "skip pod CPUs requests modifications because pod container has both CPU limit and request" + return nil + } + // before we update the pod available under admission attributes, we need to verify that deletion of the CPU request // will not change the pod QoS class, otherwise skip pod mutation // 1. Copy the pod @@ -408,6 +415,19 @@ func getPodQoSClass(containers []coreapi.Container) coreapi.PodQOSClass { return coreapi.PodQOSBurstable } +func podHasBothCPULimitAndRequest(containers []coreapi.Container) bool { + for _, c := range containers { + _, cpuRequestExists := c.Resources.Requests[coreapi.ResourceCPU] + _, cpuLimitExists := c.Resources.Limits[coreapi.ResourceCPU] + + if cpuRequestExists && cpuLimitExists { + return true + } + } + + return false +} + func isManagementResourceAvailableForAllNodes(nodes []*corev1.Node, workloadType string) error { managedResource := fmt.Sprintf("%s.%s", workloadType, containerWorkloadResourceSuffix) for _, node := range nodes { diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go index f72b89a63526..671b2ad6e524 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go @@ -7,6 +7,7 @@ import ( authorizationv1 "github.com/openshift/api/authorization/v1" configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" quotav1 "github.com/openshift/api/quota/v1" securityv1 "github.com/openshift/api/security/v1" ) @@ -47,6 +48,7 @@ var supportedObjectsScheme = runtime.NewScheme() func init() { utilruntime.Must(configv1.Install(supportedObjectsScheme)) + utilruntime.Must(operatorv1.Install(supportedObjectsScheme)) utilruntime.Must(quotav1.Install(supportedObjectsScheme)) utilruntime.Must(securityv1.Install(supportedObjectsScheme)) utilruntime.Must(authorizationv1.Install(supportedObjectsScheme)) diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go index 2495d4eb67b6..c95cf9b35885 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go @@ -8,6 +8,7 @@ import ( "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/config" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/console" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network" @@ -24,6 +25,7 @@ var AllCustomResourceValidators = []string{ authentication.PluginName, features.PluginName, console.PluginName, + dns.PluginName, image.PluginName, oauth.PluginName, project.PluginName, @@ -43,6 +45,7 @@ func RegisterCustomResourceValidation(plugins *admission.Plugins) { authentication.Register(plugins) features.Register(plugins) console.Register(plugins) + dns.Register(plugins) image.Register(plugins) oauth.Register(plugins) project.Register(plugins) diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go new file mode 100644 index 000000000000..75bfe89c98d4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns/validate_dns.go @@ -0,0 +1,144 @@ +package dns + +import ( + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + operatorv1 "github.com/openshift/api/operator/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateDNS" + +// Register registers the DNS validation plugin. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.GroupVersion.WithResource("dnses").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("DNS"): dnsV1{}, + }) + }) +} + +// toDNSV1 converts a runtime object to a versioned DNS. +func toDNSV1(uncastObj runtime.Object) (*operatorv1.DNS, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*operatorv1.DNS) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"DNS"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"}), + } + } + + return obj, nil +} + +// dnsV1 is runtime object that is validated as a versioned DNS. +type dnsV1 struct{} + +// ValidateCreate validates a DNS that is being created. +func (dnsV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecCreate(obj.Spec)...) + + return errs +} + +// ValidateUpdate validates a DNS that is being updated. +func (dnsV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateDNSSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +// ValidateStatusUpdate validates a DNS status that is being updated. +func (dnsV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toDNSV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toDNSV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +// validateDNSSpecCreate validates the spec of a DNS that is being created. +func validateDNSSpecCreate(spec operatorv1.DNSSpec) field.ErrorList { + return validateDNSSpec(spec) +} + +// validateDNSSpecUpdate validates the spec of a DNS that is being updated. +func validateDNSSpecUpdate(newspec, oldspec operatorv1.DNSSpec) field.ErrorList { + return validateDNSSpec(newspec) +} + +// validateDNSSpec validates the spec of a DNS. +func validateDNSSpec(spec operatorv1.DNSSpec) field.ErrorList { + var errs field.ErrorList + specField := field.NewPath("spec") + errs = append(errs, validateDNSNodePlacement(spec.NodePlacement, specField.Child("nodePlacement"))...) + return errs +} + +// validateDNSSpec validates the spec.nodePlacement field of a DNS. +func validateDNSNodePlacement(nodePlacement operatorv1.DNSNodePlacement, fldPath *field.Path) field.ErrorList { + var errs field.ErrorList + if len(nodePlacement.NodeSelector) != 0 { + errs = append(errs, unversionedvalidation.ValidateLabels(nodePlacement.NodeSelector, fldPath.Child("nodeSelector"))...) + } + if len(nodePlacement.Tolerations) != 0 { + errs = append(errs, validateTolerations(nodePlacement.Tolerations, fldPath.Child("tolerations"))...) + } + return errs +} + +// validateTolerations validates a slice of corev1.Toleration. +func validateTolerations(versionedTolerations []corev1.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + unversionedTolerations := make([]api.Toleration, len(versionedTolerations)) + for i := range versionedTolerations { + if err := k8s_api_v1.Convert_v1_Toleration_To_core_Toleration(&versionedTolerations[i], &unversionedTolerations[i], nil); err != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Index(i), unversionedTolerations[i], err.Error())) + } + } + allErrors = append(allErrors, apivalidation.ValidateTolerations(unversionedTolerations, fldPath)...) + return allErrors +} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go index cd0002dea7a0..6b0990170015 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go @@ -2,6 +2,7 @@ package deprecatedapirequest import ( "context" + "math/rand" "strings" "sync" "time" @@ -93,7 +94,7 @@ func (c *controller) Start(stop <-chan struct{}) { }() // write out logs every c.updatePeriod - go wait.UntilWithContext(ctx, c.persistRequestCountForAllResources, c.updatePeriod) + go wait.NonSlidingUntilWithContext(ctx, c.persistRequestCountForAllResources, c.updatePeriod) } func (c *controller) persistRequestCountForAllResources(ctx context.Context) { @@ -115,7 +116,10 @@ func (c *controller) persistRequestCountForAllResources(ctx context.Context) { for gvr := range countsToPersist.resourceToRequestCount { resourceCount := countsToPersist.Resource(gvr) wg.Add(1) - go c.persistRequestCountForResource(ctx, &wg, currentHour, expiredHour, resourceCount) + go func() { + time.Sleep(time.Duration(rand.Int63n(int64(c.updatePeriod / 5 * 4)))) // smear out over the interval to avoid resource spikes + c.persistRequestCountForResource(ctx, &wg, currentHour, expiredHour, resourceCount) + }() } wg.Wait() } diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go index 6a8cbc50a658..85bdeb932c31 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go @@ -1,27 +1,16 @@ package openshiftkubeapiserver import ( - "context" - "fmt" "net/http" "strings" - "time" - authorizationv1 "github.com/openshift/api/authorization/v1" - "github.com/openshift/library-go/pkg/apiserver/httprequest" authenticationv1 "k8s.io/api/authentication/v1" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/endpoints/filters" - "k8s.io/apiserver/pkg/endpoints/metrics" - "k8s.io/apiserver/pkg/endpoints/request" genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/util/flowcontrol" patchfilters "k8s.io/kubernetes/openshift-kube-apiserver/filters" "k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/library-go/pkg/apiserver/httprequest" ) // TODO switch back to taking a kubeapiserver config. For now make it obviously safe for 3.11 @@ -40,9 +29,6 @@ func BuildHandlerChain(consolePublicURL string, oauthMetadataFile string, deprec // well-known comes after the normal handling chain. This shows where to connect for oauth information handler := withOAuthInfo(apiHandler, oAuthMetadata) - // we rate limit watches after building the regular handler chain so we have the context information - handler = withWatchRateLimit(handler) - // after normal chain, so that user is in context handler = patchfilters.WithDeprecatedApiRequestLogging(handler, deprecatedAPIRequestController) @@ -115,129 +101,3 @@ func translateLegacyScopeImpersonation(handler http.Handler) http.Handler { handler.ServeHTTP(w, req) }) } - -type authorizerAttributesFunc func(ctx context.Context) (authorizer.Attributes, error) - -type watchRateLimit struct { - delegate http.Handler - - earlyRateLimiter flowcontrol.RateLimiter - middleRateLimiter flowcontrol.RateLimiter - - authorizerAttributesFn authorizerAttributesFunc - clock clock.Clock - earlyEndTime time.Time - middleEndTime time.Time -} - -// ServeHTTP rate limits the establishment of watches to keep kube-apiservers from crashing. -// Rate limiting watches effectively creates an upper bound on secret and configmap mounts of 10*QPS because of a -// ten minute watch timeout and kubelets use watches to get the content for the mount. -// We will break the rate limiting into three timespans -// 1. first ten minutes: this is the most restrictive 10000 total mounted secrets and configmaps, 1000 per minute. -// We're trying to break up the slug of kubelet traffic and -// we want to be sure that operators can make progress during this time if we need to recover a cluster in -// a bad state. -// 2. second ten minutes: this is less restrictive 20000 total mounted secrets and configmaps, 2000 per minute. -// This lets us start to ramp up during a relative steady state. -// 3. no limit. We have this to handle cases of large clusters with more than 20000 mounted secrets and configmaps. -// I honestly don't know how common this is, but I don't want to break on it. -// Recall that we observed more than the the 30,000 per minute observed during some disruptive events on a cluster. -// In addition, we special case watches in the platform operator namespaces, cluster scope, and kube-system. -// We have not observed large numbers of these and they are required in order to make progress when trying to correct -// some kinds of cluster failures. -func (h watchRateLimit) ServeHTTP(w http.ResponseWriter, req *http.Request) { - var effectiveRateLimiter flowcontrol.RateLimiter - now := h.clock.Now() - switch { - case now.After(h.middleEndTime): - // we are past our rate limiting time - h.delegate.ServeHTTP(w, req) - return - case now.After(h.earlyEndTime): - effectiveRateLimiter = h.middleRateLimiter - default: - effectiveRateLimiter = h.earlyRateLimiter - } - - ctx := req.Context() - - attributes, err := h.authorizerAttributesFn(ctx) - if err != nil { - // if we cannot get attributes, don't fail the request - h.delegate.ServeHTTP(w, req) - return - } - if attributes.GetUser() == nil { - // if we cannot get user, don't fail the request - h.delegate.ServeHTTP(w, req) - return - } - for _, group := range attributes.GetUser().GetGroups() { - if group == user.SystemPrivilegedGroup { - // system:masters always have the power! - h.delegate.ServeHTTP(w, req) - return - } - } - - if attributes.GetVerb() != "watch" { - // only throttle watch establishment - h.delegate.ServeHTTP(w, req) - return - } - namespace := attributes.GetNamespace() - switch { - case len(namespace) == 0: - // don't rate limit cluster scoped watches because operators that need to make progress may use those - // if we have to restrict further we can - h.delegate.ServeHTTP(w, req) - return - - case strings.HasPrefix("kube-", namespace): - // don't rate limit kube- because some operators use and we use this for delegated authn - h.delegate.ServeHTTP(w, req) - return - - case strings.HasPrefix("openshift-", namespace): - // don't rate limit openshift- because we need operators to make progress, so we need openshift- mounts - // to succeed in order to progress - h.delegate.ServeHTTP(w, req) - return - - } - - if !effectiveRateLimiter.TryAccept() { - // add a metric for us to observe - if requestInfo, ok := request.RequestInfoFrom(ctx); ok { - metrics.RecordRequestTermination(req, requestInfo, "apiserver-watch", http.StatusTooManyRequests) - } - - ae := request.AuditEventFrom(ctx) - audit.LogAnnotation(ae, "apiserver.openshift.io/watch-rate-limit", "rate-limited") - retryAfter := rand.Intn(15) + 5 // evenly weighted from 5-20 second wait - // Return a 429 status indicating "Too Many Requests", but make sure its recognizeable - w.Header().Set("Retry-After", fmt.Sprintf("%d", retryAfter)) - http.Error(w, "Too many WATCH requests, please try again later.", http.StatusTooManyRequests) - return - } - h.delegate.ServeHTTP(w, req) -} - -func newWatchRateLimit(handler http.Handler, theClock clock.Clock) watchRateLimit { - startTime := theClock.Now() - - return watchRateLimit{ - delegate: handler, - earlyRateLimiter: flowcontrol.NewTokenBucketRateLimiterWithClock(16.6, 100, theClock), - middleRateLimiter: flowcontrol.NewTokenBucketRateLimiterWithClock(33.3, 100, theClock), - authorizerAttributesFn: filters.GetAuthorizerAttributes, - clock: theClock, - earlyEndTime: startTime.Add(10 * time.Minute), - middleEndTime: startTime.Add(20 * time.Minute), - } -} - -func withWatchRateLimit(handler http.Handler) http.Handler { - return newWatchRateLimit(handler, clock.RealClock{}) -} diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index bdb3588f760c..05dee386236a 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -786,7 +786,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA}, SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23 SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23 - BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Beta}, // TODO(auth): investigate the impact of enabling this feature (https://bugzilla.redhat.com/show_bug.cgi?id=1946479) + BoundServiceAccountTokenVolume: {Default: true, PreRelease: featuregate.Beta}, ServiceAccountIssuerDiscovery: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 CRIContainerLogRotation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 CSIMigration: {Default: true, PreRelease: featuregate.Beta}, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/DOWNSTREAM_OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/DOWNSTREAM_OWNERS index d484fa4fc246..592f771c89da 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/DOWNSTREAM_OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/DOWNSTREAM_OWNERS @@ -5,6 +5,7 @@ reviewers: - rphillips - sjenning - mrunalp + - ehashman # Sub-package approvers from upstream with permission to approve downstream backports following these rules: # - they MUST be approvers upstream (here compare https://github.com/kubernetes/kubernetes/blob/17bb2fc050ec786b60db7d8d6d4d3ac8eeac205b/pkg/kubelet/OWNERS#L10-L11) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go index 5b0016c3c2f5..bd707decb16c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward/httpstream.go @@ -163,10 +163,6 @@ func (h *httpStreamHandler) removeStreamPair(requestID string) { h.streamPairsLock.Lock() defer h.streamPairsLock.Unlock() - if h.conn != nil { - pair := h.streamPairs[requestID] - h.conn.RemoveStreams(pair.dataStream, pair.errorStream) - } delete(h.streamPairs, requestID) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 386d8d2eb126..9396a5c62c47 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -50,6 +50,26 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume return volumesToReturn, len(volumesToReturn) > 0 } +// ListBlockVolumesForPod returns a map of the mounted volumes for the given +// pod. The key in the map is the OuterVolumeSpecName (i.e. +// pod.Spec.Volumes[x].Name) +func (kl *Kubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) { + volumesToReturn := make(map[string]volume.BlockVolume) + podVolumes := kl.volumeManager.GetMountedVolumesForPod( + volumetypes.UniquePodName(podUID)) + for outerVolumeSpecName, volume := range podVolumes { + // TODO: volume.Mounter could be nil if volume object is recovered + // from reconciler's sync state process. PR 33616 will fix this problem + // to create Mounter object when recovering volume state. + if volume.BlockVolumeMapper == nil { + continue + } + volumesToReturn[outerVolumeSpecName] = volume.BlockVolumeMapper + } + + return volumesToReturn, len(volumesToReturn) > 0 +} + // podVolumesExist checks with the volume manager and returns true any of the // pods for the specified volume are mounted. func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go index c3781cf9b210..77e4fe7268cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go @@ -88,6 +88,9 @@ type Provider interface { // ListVolumesForPod returns the stats of the volume used by the pod with // the podUID. ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) + // ListBlockVolumesForPod returns the stats of the volume used by the + // pod with the podUID. + ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) // GetPods returns the specs of all the pods running on this node. GetPods() []*v1.Pod diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go index b12349a1e5a1..89cf5fd4c2a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go @@ -96,10 +96,32 @@ func (s *volumeStatCalculator) GetLatest() (PodVolumeStats, bool) { func (s *volumeStatCalculator) calcAndStoreStats() { // Find all Volumes for the Pod volumes, found := s.statsProvider.ListVolumesForPod(s.pod.UID) - if !found { + blockVolumes, bvFound := s.statsProvider.ListBlockVolumesForPod(s.pod.UID) + if !found && !bvFound { return } + metricVolumes := make(map[string]volume.MetricsProvider) + + if found { + for name, v := range volumes { + metricVolumes[name] = v + } + } + if bvFound { + for name, v := range blockVolumes { + // Only add the blockVolume if it implements the MetricsProvider interface + if _, ok := v.(volume.MetricsProvider); ok { + // Some drivers inherit the MetricsProvider interface from Filesystem + // mode volumes, but do not implement it for Block mode. Checking + // SupportsMetrics() will prevent panics in that case. + if v.SupportsMetrics() { + metricVolumes[name] = v + } + } + } + } + // Get volume specs for the pod - key'd by volume name volumesSpec := make(map[string]v1.Volume) for _, v := range s.pod.Spec.Volumes { @@ -109,7 +131,7 @@ func (s *volumeStatCalculator) calcAndStoreStats() { // Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats var ephemeralStats []stats.VolumeStats var persistentStats []stats.VolumeStats - for name, v := range volumes { + for name, v := range metricVolumes { metric, err := v.GetMetrics() if err != nil { // Expected for Volumes that don't support Metrics diff --git a/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/flowschema.go b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/flowschema.go new file mode 100644 index 000000000000..366bcd5acd8a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/flowschema.go @@ -0,0 +1,198 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ensurer + +import ( + "context" + "errors" + "fmt" + + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrolapisv1beta1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta1" +) + +var ( + errObjectNotFlowSchema = errors.New("object is not a FlowSchema type") +) + +// FlowSchemaEnsurer ensures the specified bootstrap configuration objects +type FlowSchemaEnsurer interface { + Ensure([]*flowcontrolv1beta1.FlowSchema) error +} + +// FlowSchemaRemover removes the specified bootstrap configuration objects +type FlowSchemaRemover interface { + Remove([]string) error +} + +// NewSuggestedFlowSchemaEnsurer returns a FlowSchemaEnsurer instance that +// can be used to ensure a set of suggested FlowSchema configuration objects. +func NewSuggestedFlowSchemaEnsurer(client flowcontrolclient.FlowSchemaInterface) FlowSchemaEnsurer { + wrapper := &flowSchemaWrapper{ + client: client, + } + return &fsEnsurer{ + strategy: newSuggestedEnsureStrategy(wrapper), + wrapper: wrapper, + } +} + +// NewMandatoryFlowSchemaEnsurer returns a FlowSchemaEnsurer instance that +// can be used to ensure a set of mandatory FlowSchema configuration objects. +func NewMandatoryFlowSchemaEnsurer(client flowcontrolclient.FlowSchemaInterface) FlowSchemaEnsurer { + wrapper := &flowSchemaWrapper{ + client: client, + } + return &fsEnsurer{ + strategy: newMandatoryEnsureStrategy(wrapper), + wrapper: wrapper, + } +} + +// NewFlowSchemaRemover returns a FlowSchemaRemover instance that +// can be used to remove a set of FlowSchema configuration objects. +func NewFlowSchemaRemover(client flowcontrolclient.FlowSchemaInterface) FlowSchemaRemover { + return &fsEnsurer{ + wrapper: &flowSchemaWrapper{ + client: client, + }, + } +} + +// GetFlowSchemaRemoveCandidate returns a list of FlowSchema object +// names that are candidates for deletion from the cluster. +// bootstrap: a set of hard coded FlowSchema configuration objects +// kube-apiserver maintains in-memory. +func GetFlowSchemaRemoveCandidate(client flowcontrolclient.FlowSchemaInterface, bootstrap []*flowcontrolv1beta1.FlowSchema) ([]string, error) { + // TODO(101667): Use a lister here to avoid periodic LIST calls + fsList, err := client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list FlowSchema - %w", err) + } + + bootstrapNames := sets.String{} + for i := range bootstrap { + bootstrapNames.Insert(bootstrap[i].GetName()) + } + + currentObjects := make([]metav1.Object, len(fsList.Items)) + for i := range fsList.Items { + currentObjects[i] = &fsList.Items[i] + } + + return getRemoveCandidate(bootstrapNames, currentObjects), nil +} + +type fsEnsurer struct { + strategy ensureStrategy + wrapper configurationWrapper +} + +func (e *fsEnsurer) Ensure(flowSchemas []*flowcontrolv1beta1.FlowSchema) error { + for _, flowSchema := range flowSchemas { + if err := ensureConfiguration(e.wrapper, e.strategy, flowSchema); err != nil { + return err + } + } + + return nil +} + +func (e *fsEnsurer) Remove(flowSchemas []string) error { + for _, flowSchema := range flowSchemas { + if err := removeConfiguration(e.wrapper, flowSchema); err != nil { + return err + } + } + + return nil +} + +// flowSchemaWrapper abstracts all FlowSchema specific logic, with this +// we can manage all boiler plate code in one place. +type flowSchemaWrapper struct { + client flowcontrolclient.FlowSchemaInterface +} + +func (fs *flowSchemaWrapper) TypeName() string { + return "FlowSchema" +} + +func (fs *flowSchemaWrapper) Create(object runtime.Object) (runtime.Object, error) { + fsObject, ok := object.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return nil, errObjectNotFlowSchema + } + + return fs.client.Create(context.TODO(), fsObject, metav1.CreateOptions{FieldManager: fieldManager}) +} + +func (fs *flowSchemaWrapper) Update(object runtime.Object) (runtime.Object, error) { + fsObject, ok := object.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return nil, errObjectNotFlowSchema + } + + return fs.client.Update(context.TODO(), fsObject, metav1.UpdateOptions{FieldManager: fieldManager}) +} + +func (fs *flowSchemaWrapper) Get(name string) (configurationObject, error) { + return fs.client.Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (fs *flowSchemaWrapper) Delete(name string) error { + return fs.client.Delete(context.TODO(), name, metav1.DeleteOptions{}) +} + +func (fs *flowSchemaWrapper) CopySpec(bootstrap, current runtime.Object) error { + bootstrapFS, ok := bootstrap.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return errObjectNotFlowSchema + } + currentFS, ok := current.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return errObjectNotFlowSchema + } + + specCopy := bootstrapFS.Spec.DeepCopy() + currentFS.Spec = *specCopy + return nil +} + +func (fs *flowSchemaWrapper) HasSpecChanged(bootstrap, current runtime.Object) (bool, error) { + bootstrapFS, ok := bootstrap.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return false, errObjectNotFlowSchema + } + currentFS, ok := current.(*flowcontrolv1beta1.FlowSchema) + if !ok { + return false, errObjectNotFlowSchema + } + + return flowSchemaSpecChanged(bootstrapFS, currentFS), nil +} + +func flowSchemaSpecChanged(expected, actual *flowcontrolv1beta1.FlowSchema) bool { + copiedExpectedFlowSchema := expected.DeepCopy() + flowcontrolapisv1beta1.SetObjectDefaults_FlowSchema(copiedExpectedFlowSchema) + return !equality.Semantic.DeepEqual(copiedExpectedFlowSchema.Spec, actual.Spec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/prioritylevelconfiguration.go b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/prioritylevelconfiguration.go new file mode 100644 index 000000000000..9af7bab8f900 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/prioritylevelconfiguration.go @@ -0,0 +1,198 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ensurer + +import ( + "context" + "errors" + "fmt" + + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrolapisv1beta1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta1" +) + +var ( + errObjectNotPriorityLevel = errors.New("object is not a PriorityLevelConfiguration type") +) + +// PriorityLevelEnsurer ensures the specified bootstrap configuration objects +type PriorityLevelEnsurer interface { + Ensure([]*flowcontrolv1beta1.PriorityLevelConfiguration) error +} + +// PriorityLevelRemover removes the specified bootstrap configuration objects +type PriorityLevelRemover interface { + Remove([]string) error +} + +// NewSuggestedPriorityLevelEnsurerEnsurer returns a PriorityLevelEnsurer instance that +// can be used to ensure a set of suggested PriorityLevelConfiguration configuration objects. +func NewSuggestedPriorityLevelEnsurerEnsurer(client flowcontrolclient.PriorityLevelConfigurationInterface) PriorityLevelEnsurer { + wrapper := &priorityLevelConfigurationWrapper{ + client: client, + } + return &plEnsurer{ + strategy: newSuggestedEnsureStrategy(wrapper), + wrapper: wrapper, + } +} + +// NewMandatoryPriorityLevelEnsurer returns a PriorityLevelEnsurer instance that +// can be used to ensure a set of mandatory PriorityLevelConfiguration configuration objects. +func NewMandatoryPriorityLevelEnsurer(client flowcontrolclient.PriorityLevelConfigurationInterface) PriorityLevelEnsurer { + wrapper := &priorityLevelConfigurationWrapper{ + client: client, + } + return &plEnsurer{ + strategy: newMandatoryEnsureStrategy(wrapper), + wrapper: wrapper, + } +} + +// NewPriorityLevelRemover returns a PriorityLevelRemover instance that +// can be used to remove a set of PriorityLevelConfiguration configuration objects. +func NewPriorityLevelRemover(client flowcontrolclient.PriorityLevelConfigurationInterface) PriorityLevelRemover { + return &plEnsurer{ + wrapper: &priorityLevelConfigurationWrapper{ + client: client, + }, + } +} + +// GetPriorityLevelRemoveCandidate returns a list of PriorityLevelConfiguration +// names that are candidates for removal from the cluster. +// bootstrap: a set of hard coded PriorityLevelConfiguration configuration +// objects kube-apiserver maintains in-memory. +func GetPriorityLevelRemoveCandidate(client flowcontrolclient.PriorityLevelConfigurationInterface, bootstrap []*flowcontrolv1beta1.PriorityLevelConfiguration) ([]string, error) { + // TODO(101667): Use a lister here to avoid periodic LIST calls + plList, err := client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list PriorityLevelConfiguration - %w", err) + } + + bootstrapNames := sets.String{} + for i := range bootstrap { + bootstrapNames.Insert(bootstrap[i].GetName()) + } + + currentObjects := make([]metav1.Object, len(plList.Items)) + for i := range plList.Items { + currentObjects[i] = &plList.Items[i] + } + + return getRemoveCandidate(bootstrapNames, currentObjects), nil +} + +type plEnsurer struct { + strategy ensureStrategy + wrapper configurationWrapper +} + +func (e *plEnsurer) Ensure(priorityLevels []*flowcontrolv1beta1.PriorityLevelConfiguration) error { + for _, priorityLevel := range priorityLevels { + if err := ensureConfiguration(e.wrapper, e.strategy, priorityLevel); err != nil { + return err + } + } + + return nil +} + +func (e *plEnsurer) Remove(priorityLevels []string) error { + for _, priorityLevel := range priorityLevels { + if err := removeConfiguration(e.wrapper, priorityLevel); err != nil { + return err + } + } + + return nil +} + +// priorityLevelConfigurationWrapper abstracts all PriorityLevelConfiguration specific logic, +// with this we can manage all boiler plate code in one place. +type priorityLevelConfigurationWrapper struct { + client flowcontrolclient.PriorityLevelConfigurationInterface +} + +func (fs *priorityLevelConfigurationWrapper) TypeName() string { + return "PriorityLevelConfiguration" +} + +func (fs *priorityLevelConfigurationWrapper) Create(object runtime.Object) (runtime.Object, error) { + plObject, ok := object.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return nil, errObjectNotPriorityLevel + } + + return fs.client.Create(context.TODO(), plObject, metav1.CreateOptions{FieldManager: fieldManager}) +} + +func (fs *priorityLevelConfigurationWrapper) Update(object runtime.Object) (runtime.Object, error) { + fsObject, ok := object.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return nil, errObjectNotPriorityLevel + } + + return fs.client.Update(context.TODO(), fsObject, metav1.UpdateOptions{FieldManager: fieldManager}) +} + +func (fs *priorityLevelConfigurationWrapper) Get(name string) (configurationObject, error) { + return fs.client.Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (fs *priorityLevelConfigurationWrapper) Delete(name string) error { + return fs.client.Delete(context.TODO(), name, metav1.DeleteOptions{}) +} + +func (fs *priorityLevelConfigurationWrapper) CopySpec(bootstrap, current runtime.Object) error { + bootstrapFS, ok := bootstrap.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return errObjectNotPriorityLevel + } + currentFS, ok := current.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return errObjectNotPriorityLevel + } + + specCopy := bootstrapFS.Spec.DeepCopy() + currentFS.Spec = *specCopy + return nil +} + +func (fs *priorityLevelConfigurationWrapper) HasSpecChanged(bootstrap, current runtime.Object) (bool, error) { + bootstrapFS, ok := bootstrap.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return false, errObjectNotPriorityLevel + } + currentFS, ok := current.(*flowcontrolv1beta1.PriorityLevelConfiguration) + if !ok { + return false, errObjectNotPriorityLevel + } + + return priorityLevelSpecChanged(bootstrapFS, currentFS), nil +} + +func priorityLevelSpecChanged(expected, actual *flowcontrolv1beta1.PriorityLevelConfiguration) bool { + copiedExpectedPriorityLevel := expected.DeepCopy() + flowcontrolapisv1beta1.SetObjectDefaults_PriorityLevelConfiguration(copiedExpectedPriorityLevel) + return !equality.Semantic.DeepEqual(copiedExpectedPriorityLevel.Spec, actual.Spec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/strategy.go new file mode 100644 index 000000000000..32fce862e779 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer/strategy.go @@ -0,0 +1,321 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ensurer + +import ( + "errors" + "fmt" + "strconv" + + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + "github.com/google/go-cmp/cmp" +) + +const ( + fieldManager = "api-priority-and-fairness-config-producer-v1" +) + +// ensureStrategy provides a strategy for ensuring apf bootstrap configurationWrapper. +// We have two types of configurationWrapper objects: +// - mandatory: the mandatory configurationWrapper objects are about ensuring that the P&F +// system itself won't crash; we have to be sure there's 'catch-all' place for +// everything to go. Any changes made by the cluster operators to these +// configurationWrapper objects will be stomped by the apiserver. +// +// - suggested: additional configurationWrapper objects for initial behavior. +// the cluster operators have an option to edit or delete these configurationWrapper objects. +type ensureStrategy interface { + // Name of the strategy, for now we have two: 'mandatory' and 'suggested'. + // This comes handy in logging. + Name() string + + // ShouldUpdate accepts the current and the bootstrap configuration and determines + // whether an update is necessary. + // current is the existing in-cluster configuration object. + // bootstrap is the configuration the kube-apiserver maintains in-memory. + // + // ok: true if auto update is required, otherwise false + // object: the new object represents the new configuration to be stored in-cluster. + // err: err is set when the function runs into an error and can not + // determine if auto update is needed. + ShouldUpdate(current, bootstrap configurationObject) (object runtime.Object, ok bool, err error) +} + +// this internal interface provides abstraction for dealing with the `Spec` +// of both 'FlowSchema' and 'PriorityLevelConfiguration' objects. +// Since the ensure logic for both types is common, we use a few internal interfaces +// to abstract out the differences of these two types. +type specCopier interface { + // HasSpecChanged returns true if the spec of both the bootstrap and + // the current configuration object is same, otherwise false. + HasSpecChanged(bootstrap, current runtime.Object) (bool, error) + + // CopySpec makes a deep copy the spec of the bootstrap object + // and copies it to that of the current object. + // CopySpec assumes that the current object is safe to mutate, so it + // rests with the caller to make a deep copy of the current. + CopySpec(bootstrap, current runtime.Object) error +} + +// this internal interface provides abstraction for CRUD operation +// related to both 'FlowSchema' and 'PriorityLevelConfiguration' objects. +// Since the ensure logic for both types is common, we use a few internal interfaces +// to abstract out the differences of these two types. +type configurationClient interface { + Create(object runtime.Object) (runtime.Object, error) + Update(object runtime.Object) (runtime.Object, error) + Get(name string) (configurationObject, error) + Delete(name string) error +} + +type configurationWrapper interface { + // TypeName returns the type of the configuration that this interface deals with. + // We use it to log the type name of the configuration object being ensured. + // It is either 'PriorityLevelConfiguration' or 'FlowSchema' + TypeName() string + + configurationClient + specCopier +} + +// A convenient wrapper interface that is used by the ensure logic. +type configurationObject interface { + metav1.Object + runtime.Object +} + +func newSuggestedEnsureStrategy(copier specCopier) ensureStrategy { + return &strategy{ + copier: copier, + alwaysAutoUpdateSpec: false, + name: "suggested", + } +} + +func newMandatoryEnsureStrategy(copier specCopier) ensureStrategy { + return &strategy{ + copier: copier, + alwaysAutoUpdateSpec: true, + name: "mandatory", + } +} + +// auto-update strategy for the configuration objects +type strategy struct { + copier specCopier + alwaysAutoUpdateSpec bool + name string +} + +func (s *strategy) Name() string { + return s.name +} + +func (s *strategy) ShouldUpdate(current, bootstrap configurationObject) (runtime.Object, bool, error) { + if current == nil || bootstrap == nil { + return nil, false, nil + } + + autoUpdateSpec := s.alwaysAutoUpdateSpec + if !autoUpdateSpec { + autoUpdateSpec = shouldUpdateSpec(current) + } + updateAnnotation := shouldUpdateAnnotation(current, autoUpdateSpec) + + var specChanged bool + if autoUpdateSpec { + changed, err := s.copier.HasSpecChanged(bootstrap, current) + if err != nil { + return nil, false, fmt.Errorf("failed to compare spec - %w", err) + } + specChanged = changed + } + + if !(updateAnnotation || specChanged) { + // the annotation key is up to date and the spec has not changed, no update is necessary + return nil, false, nil + } + + // if we are here, either we need to update the annotation key or the spec. + copy, ok := current.DeepCopyObject().(configurationObject) + if !ok { + // we should never be here + return nil, false, errors.New("incompatible object type") + } + + if updateAnnotation { + setAutoUpdateAnnotation(copy, autoUpdateSpec) + } + if specChanged { + s.copier.CopySpec(bootstrap, copy) + } + + return copy, true, nil +} + +// shouldUpdateSpec inspects the auto-update annotation key and generation field to determine +// whether the configurationWrapper object should be auto-updated. +func shouldUpdateSpec(accessor metav1.Object) bool { + value, _ := accessor.GetAnnotations()[flowcontrolv1beta1.AutoUpdateAnnotationKey] + if autoUpdate, err := strconv.ParseBool(value); err == nil { + return autoUpdate + } + + // We are here because of either a or b: + // a. the annotation key is missing. + // b. the annotation key is present but the value does not represent a boolean. + // In either case, if the operator hasn't changed the spec, we can safely auto update. + // Please note that we can't protect the changes made by the operator in the following scenario: + // - The operator deletes and recreates the same object with a variant spec (generation resets to 1). + if accessor.GetGeneration() == 1 { + return true + } + return false +} + +// shouldUpdateAnnotation determines whether the current value of the auto-update annotation +// key matches the desired value. +func shouldUpdateAnnotation(accessor metav1.Object, desired bool) bool { + if value, ok := accessor.GetAnnotations()[flowcontrolv1beta1.AutoUpdateAnnotationKey]; ok { + if current, err := strconv.ParseBool(value); err == nil && current == desired { + return false + } + } + + return true +} + +// setAutoUpdateAnnotation sets the auto-update annotation key to the specified value. +func setAutoUpdateAnnotation(accessor metav1.Object, autoUpdate bool) { + if accessor.GetAnnotations() == nil { + accessor.SetAnnotations(map[string]string{}) + } + + accessor.GetAnnotations()[flowcontrolv1beta1.AutoUpdateAnnotationKey] = strconv.FormatBool(autoUpdate) +} + +// ensureConfiguration ensures the boostrap configurationWrapper on the cluster based on the specified strategy. +func ensureConfiguration(wrapper configurationWrapper, strategy ensureStrategy, bootstrap configurationObject) error { + name := bootstrap.GetName() + configurationType := strategy.Name() + + current, err := wrapper.Get(bootstrap.GetName()) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to retrieve %s type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err) + } + + // we always re-create a missing configuration object + if _, err := wrapper.Create(bootstrap); err != nil { + return fmt.Errorf("cannot create %s type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err) + } + + klog.V(2).InfoS(fmt.Sprintf("Successfully created %s", wrapper.TypeName()), "type", configurationType, "name", name) + return nil + } + + klog.V(5).InfoS(fmt.Sprintf("The %s already exists, checking whether it is up to date", wrapper.TypeName()), "type", configurationType, "name", name) + newObject, update, err := strategy.ShouldUpdate(current, bootstrap) + if err != nil { + return fmt.Errorf("failed to determine whether auto-update is required for %s type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err) + } + if !update { + if klog.V(5).Enabled() { + // TODO: if we use structured logging here the diff gets escaped and very awkward to read in the log + klog.Infof("No update required for the %s type=%s name=%q diff: %s", wrapper.TypeName(), configurationType, name, cmp.Diff(current, bootstrap)) + } + return nil + } + + if _, err := wrapper.Update(newObject); err != nil { + return fmt.Errorf("failed to update the %s, will retry later type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err) + } + + klog.V(2).Infof("Updated the %s type=%s name=%q diff: %s", wrapper.TypeName(), configurationType, name, cmp.Diff(current, newObject)) + return nil +} + +func removeConfiguration(wrapper configurationWrapper, name string) error { + current, err := wrapper.Get(name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("failed to retrieve the %s, will retry later name=%q error=%w", wrapper.TypeName(), name, err) + } + + value := current.GetAnnotations()[flowcontrolv1beta1.AutoUpdateAnnotationKey] + autoUpdate, err := strconv.ParseBool(value) + if err != nil { + klog.ErrorS(err, fmt.Sprintf("Skipping deletion of the %s", wrapper.TypeName()), "name", name) + + // This may need manual intervention, in case the annotation value is malformed, + // so don't return an error, that might trigger futile retry loop. + return nil + } + if !autoUpdate { + klog.V(5).InfoS(fmt.Sprintf("Skipping deletion of the %s", wrapper.TypeName()), "name", name) + return nil + } + + if err := wrapper.Delete(name); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("failed to delete the %s, will retry later name=%q error=%w", wrapper.TypeName(), name, err) + } + + klog.V(2).InfoS(fmt.Sprintf("Successfully deleted the %s", wrapper.TypeName()), "name", name) + return nil +} + +// getRemoveCandidate returns a list of configuration objects we should delete +// from the cluster given a set of bootstrap and current configuration. +// bootstrap: a set of hard coded configuration kube-apiserver maintains in-memory. +// current: a set of configuration objects that exist on the cluster +// Any object present in current is a candidate for removal if both a and b are true: +// a. the object in current is missing from the bootstrap configuration +// b. the object has the designated auto-update annotation key +// This function shares the common logic for both FlowSchema and PriorityLevelConfiguration +// type and hence it accepts metav1.Object only. +func getRemoveCandidate(bootstrap sets.String, current []metav1.Object) []string { + if len(current) == 0 { + return nil + } + + candidates := make([]string, 0) + for i := range current { + object := current[i] + if _, ok := object.GetAnnotations()[flowcontrolv1beta1.AutoUpdateAnnotationKey]; !ok { + // the configuration object does not have the annotation key + continue + } + + if _, ok := bootstrap[object.GetName()]; !ok { + candidates = append(candidates, object.GetName()) + } + } + return candidates +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/rest/storage_flowcontrol.go b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/rest/storage_flowcontrol.go index f968068b46e6..c49eb6cf4112 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/rest/storage_flowcontrol.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/flowcontrol/rest/storage_flowcontrol.go @@ -21,10 +21,6 @@ import ( "fmt" "time" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" flowcontrolbootstrap "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" "k8s.io/apiserver/pkg/registry/generic" @@ -37,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/apis/flowcontrol" flowcontrolapisv1alpha1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1alpha1" flowcontrolapisv1beta1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta1" + "k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer" flowschemastore "k8s.io/kubernetes/pkg/registry/flowcontrol/flowschema/storage" prioritylevelconfigurationstore "k8s.io/kubernetes/pkg/registry/flowcontrol/prioritylevelconfiguration/storage" ) @@ -101,179 +98,151 @@ func (p RESTStorageProvider) GroupName() string { // PostStartHook returns the hook func that launches the config provider func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) { - return PostStartHookName, func(hookContext genericapiserver.PostStartHookContext) error { - flowcontrolClientSet := flowcontrolclient.NewForConfigOrDie(hookContext.LoopbackClientConfig) - go func() { - const retryCreatingSuggestedSettingsInterval = time.Second - err := wait.PollImmediateUntil( - retryCreatingSuggestedSettingsInterval, - func() (bool, error) { - should, err := shouldEnsureSuggested(flowcontrolClientSet) - if err != nil { - klog.Errorf("failed getting exempt flow-schema, will retry later: %v", err) - return false, nil - } - if !should { - return true, nil - } - err = ensure( - flowcontrolClientSet, - flowcontrolbootstrap.SuggestedFlowSchemas, - flowcontrolbootstrap.SuggestedPriorityLevelConfigurations) - if err != nil { - klog.Errorf("failed ensuring suggested settings, will retry later: %v", err) - return false, nil - } - return true, nil - }, - hookContext.StopCh) - if err != nil { - klog.ErrorS(err, "Ensuring suggested configuration failed") - - // We should not attempt creation of mandatory objects if ensuring the suggested - // configuration resulted in an error. - // This only happens when the stop channel is closed. - // We rely on the presence of the "exempt" priority level configuration object in the cluster - // to indicate whether we should ensure suggested configuration. - return + return PostStartHookName, ensureAPFBootstrapConfiguration, nil +} + +func ensureAPFBootstrapConfiguration(hookContext genericapiserver.PostStartHookContext) error { + clientset, err := flowcontrolclient.NewForConfig(hookContext.LoopbackClientConfig) + if err != nil { + return fmt.Errorf("failed to initialize clientset for APF - %w", err) + } + + // get a derived context that gets cancelled after 5m or + // when the StopCh gets closed, whichever happens first. + ctx, cancel := contextFromChannelAndMaxWaitDuration(hookContext.StopCh, 5*time.Minute) + defer cancel() + + err = wait.PollImmediateUntilWithContext( + ctx, + time.Second, + func(context.Context) (bool, error) { + if err := ensure(clientset); err != nil { + klog.ErrorS(err, "APF bootstrap ensurer ran into error, will retry later") + return false, nil } + return true, nil + }) + if err != nil { + return fmt.Errorf("unable to initialize APF bootstrap configuration") + } - const retryCreatingMandatorySettingsInterval = time.Minute - _ = wait.PollImmediateUntil( - retryCreatingMandatorySettingsInterval, - func() (bool, error) { - if err := upgrade( - flowcontrolClientSet, - flowcontrolbootstrap.MandatoryFlowSchemas, - // Note: the "exempt" priority-level is supposed to be the last item in the pre-defined - // list, so that a crash in the midst of the first kube-apiserver startup does not prevent - // the full initial set of objects from being created. - flowcontrolbootstrap.MandatoryPriorityLevelConfigurations, - ); err != nil { - klog.Errorf("failed creating mandatory flowcontrol settings: %v", err) - return false, nil - } - return false, nil // always retry - }, - hookContext.StopCh) - }() - return nil - }, nil + // we have successfully initialized the bootstrap configuration, now we + // spin up a goroutine which reconciles the bootstrap configuration periodically. + go func() { + err := wait.PollImmediateUntil( + time.Minute, + func() (bool, error) { + if err := ensure(clientset); err != nil { + klog.ErrorS(err, "APF bootstrap ensurer ran into error, will retry later") + } + // always auto update both suggested and mandatory configuration + return false, nil + }, hookContext.StopCh) + if err != nil { + klog.ErrorS(err, "APF bootstrap ensurer is exiting") + } + }() + return nil } -// shouldEnsureSuggested checks if the exempt priority level exists and returns -// whether the suggested flow schemas and priority levels should be ensured. -func shouldEnsureSuggested(flowcontrolClientSet flowcontrolclient.FlowcontrolV1beta1Interface) (bool, error) { - if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(context.TODO(), flowcontrol.PriorityLevelConfigurationNameExempt, metav1.GetOptions{}); err != nil { - if apierrors.IsNotFound(err) { - return true, nil - } - return false, err +func ensure(clientset flowcontrolclient.FlowcontrolV1beta1Interface) error { + if err := ensureSuggestedConfiguration(clientset); err != nil { + // We should not attempt creation of mandatory objects if ensuring the suggested + // configuration resulted in an error. + // This only happens when the stop channel is closed. + return fmt.Errorf("failed ensuring suggested settings - %w", err) } - return false, nil + + if err := ensureMandatoryConfiguration(clientset); err != nil { + return fmt.Errorf("failed ensuring mandatory settings - %w", err) + } + + if err := removeConfiguration(clientset); err != nil { + return fmt.Errorf("failed to delete removed settings - %w", err) + } + + return nil } -const thisFieldManager = "api-priority-and-fairness-config-producer-v1" +func ensureSuggestedConfiguration(clientset flowcontrolclient.FlowcontrolV1beta1Interface) error { + fsEnsurer := ensurer.NewSuggestedFlowSchemaEnsurer(clientset.FlowSchemas()) + if err := fsEnsurer.Ensure(flowcontrolbootstrap.SuggestedFlowSchemas); err != nil { + return err + } -func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1beta1Interface, flowSchemas []*flowcontrolv1beta1.FlowSchema, priorityLevels []*flowcontrolv1beta1.PriorityLevelConfiguration) error { - for _, flowSchema := range flowSchemas { - _, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema, metav1.CreateOptions{FieldManager: thisFieldManager}) - if apierrors.IsAlreadyExists(err) { - klog.V(3).Infof("Suggested FlowSchema %s already exists, skipping creating", flowSchema.Name) - continue - } - if err != nil { - return fmt.Errorf("cannot create suggested FlowSchema %s due to %v", flowSchema.Name, err) - } - klog.V(3).Infof("Created suggested FlowSchema %s", flowSchema.Name) + plEnsurer := ensurer.NewSuggestedPriorityLevelEnsurerEnsurer(clientset.PriorityLevelConfigurations()) + return plEnsurer.Ensure(flowcontrolbootstrap.SuggestedPriorityLevelConfigurations) +} + +func ensureMandatoryConfiguration(clientset flowcontrolclient.FlowcontrolV1beta1Interface) error { + fsEnsurer := ensurer.NewMandatoryFlowSchemaEnsurer(clientset.FlowSchemas()) + if err := fsEnsurer.Ensure(flowcontrolbootstrap.MandatoryFlowSchemas); err != nil { + return err } - for _, priorityLevelConfiguration := range priorityLevels { - _, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration, metav1.CreateOptions{FieldManager: thisFieldManager}) - if apierrors.IsAlreadyExists(err) { - klog.V(3).Infof("Suggested PriorityLevelConfiguration %s already exists, skipping creating", priorityLevelConfiguration.Name) - continue - } - if err != nil { - return fmt.Errorf("cannot create suggested PriorityLevelConfiguration %s due to %v", priorityLevelConfiguration.Name, err) - } - klog.V(3).Infof("Created suggested PriorityLevelConfiguration %s", priorityLevelConfiguration.Name) + + plEnsurer := ensurer.NewMandatoryPriorityLevelEnsurer(clientset.PriorityLevelConfigurations()) + return plEnsurer.Ensure(flowcontrolbootstrap.MandatoryPriorityLevelConfigurations) +} + +func removeConfiguration(clientset flowcontrolclient.FlowcontrolV1beta1Interface) error { + if err := removeFlowSchema(clientset.FlowSchemas()); err != nil { + return err } - return nil + + return removePriorityLevel(clientset.PriorityLevelConfigurations()) } -func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1beta1Interface, flowSchemas []*flowcontrolv1beta1.FlowSchema, priorityLevels []*flowcontrolv1beta1.PriorityLevelConfiguration) error { - for _, expectedFlowSchema := range flowSchemas { - actualFlowSchema, err := flowcontrolClientSet.FlowSchemas().Get(context.TODO(), expectedFlowSchema.Name, metav1.GetOptions{}) - if err == nil { - // TODO(yue9944882): extract existing version from label and compare - // TODO(yue9944882): create w/ version string attached - wrongSpec, err := flowSchemaHasWrongSpec(expectedFlowSchema, actualFlowSchema) - if err != nil { - return fmt.Errorf("failed checking if mandatory FlowSchema %s is up-to-date due to %v, will retry later", expectedFlowSchema.Name, err) - } - if wrongSpec { - if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema, metav1.UpdateOptions{FieldManager: thisFieldManager}); err != nil { - return fmt.Errorf("failed upgrading mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) - } - klog.V(3).Infof("Updated mandatory FlowSchema %s because its spec was %#+v but it must be %#+v", expectedFlowSchema.Name, actualFlowSchema.Spec, expectedFlowSchema.Spec) - } - continue - } - if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed getting mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) - } - _, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema, metav1.CreateOptions{FieldManager: thisFieldManager}) - if apierrors.IsAlreadyExists(err) { - klog.V(3).Infof("Mandatory FlowSchema %s already exists, skipping creating", expectedFlowSchema.Name) - continue - } - if err != nil { - return fmt.Errorf("cannot create mandatory FlowSchema %s due to %v", expectedFlowSchema.Name, err) - } - klog.V(3).Infof("Created mandatory FlowSchema %s", expectedFlowSchema.Name) +func removeFlowSchema(client flowcontrolclient.FlowSchemaInterface) error { + bootstrap := append(flowcontrolbootstrap.MandatoryFlowSchemas, flowcontrolbootstrap.SuggestedFlowSchemas...) + candidates, err := ensurer.GetFlowSchemaRemoveCandidate(client, bootstrap) + if err != nil { + return err } - for _, expectedPriorityLevelConfiguration := range priorityLevels { - actualPriorityLevelConfiguration, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(context.TODO(), expectedPriorityLevelConfiguration.Name, metav1.GetOptions{}) - if err == nil { - // TODO(yue9944882): extract existing version from label and compare - // TODO(yue9944882): create w/ version string attached - wrongSpec, err := priorityLevelHasWrongSpec(expectedPriorityLevelConfiguration, actualPriorityLevelConfiguration) - if err != nil { - return fmt.Errorf("failed checking if mandatory PriorityLevelConfiguration %s is up-to-date due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) - } - if wrongSpec { - if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration, metav1.UpdateOptions{FieldManager: thisFieldManager}); err != nil { - return fmt.Errorf("failed upgrading mandatory PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) - } - klog.V(3).Infof("Updated mandatory PriorityLevelConfiguration %s because its spec was %#+v but must be %#+v", expectedPriorityLevelConfiguration.Name, actualPriorityLevelConfiguration.Spec, expectedPriorityLevelConfiguration.Spec) - } - continue - } - if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed getting PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) - } - _, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration, metav1.CreateOptions{FieldManager: thisFieldManager}) - if apierrors.IsAlreadyExists(err) { - klog.V(3).Infof("Mandatory PriorityLevelConfiguration %s already exists, skipping creating", expectedPriorityLevelConfiguration.Name) - continue - } - if err != nil { - return fmt.Errorf("cannot create mandatory PriorityLevelConfiguration %s due to %v", expectedPriorityLevelConfiguration.Name, err) - } - klog.V(3).Infof("Created mandatory PriorityLevelConfiguration %s", expectedPriorityLevelConfiguration.Name) + if len(candidates) == 0 { + return nil } - return nil + + fsRemover := ensurer.NewFlowSchemaRemover(client) + return fsRemover.Remove(candidates) } -func flowSchemaHasWrongSpec(expected, actual *flowcontrolv1beta1.FlowSchema) (bool, error) { - copiedExpectedFlowSchema := expected.DeepCopy() - flowcontrolapisv1beta1.SetObjectDefaults_FlowSchema(copiedExpectedFlowSchema) - return !equality.Semantic.DeepEqual(copiedExpectedFlowSchema.Spec, actual.Spec), nil +func removePriorityLevel(client flowcontrolclient.PriorityLevelConfigurationInterface) error { + bootstrap := append(flowcontrolbootstrap.MandatoryPriorityLevelConfigurations, flowcontrolbootstrap.SuggestedPriorityLevelConfigurations...) + candidates, err := ensurer.GetPriorityLevelRemoveCandidate(client, bootstrap) + if err != nil { + return err + } + if len(candidates) == 0 { + return nil + } + + plRemover := ensurer.NewPriorityLevelRemover(client) + return plRemover.Remove(candidates) } -func priorityLevelHasWrongSpec(expected, actual *flowcontrolv1beta1.PriorityLevelConfiguration) (bool, error) { - copiedExpectedPriorityLevel := expected.DeepCopy() - flowcontrolapisv1beta1.SetObjectDefaults_PriorityLevelConfiguration(copiedExpectedPriorityLevel) - return !equality.Semantic.DeepEqual(copiedExpectedPriorityLevel.Spec, actual.Spec), nil +// contextFromChannelAndMaxWaitDuration returns a Context that is bound to the +// specified channel and the wait duration. The derived context will be +// cancelled when the specified channel stopCh is closed or the maximum wait +// duration specified in maxWait elapses, whichever happens first. +// +// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. +func contextFromChannelAndMaxWaitDuration(stopCh <-chan struct{}, maxWait time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + defer cancel() + + select { + case <-stopCh: + case <-time.After(maxWait): + + // the caller can explicitly cancel the context which is an + // indication to us to exit the goroutine immediately. + // Note that we are calling cancel more than once when we are here, + // CancelFunc is idempotent and we expect no ripple effects here. + case <-ctx.Done(): + } + }() + return ctx, cancel } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go index 1405714f8bbc..e2fbf54b181e 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go @@ -260,7 +260,8 @@ func IsOnlyServiceAccountTokenSources(v *api.ProjectedVolumeSource) bool { return false } - if s.ConfigMap != nil && s.ConfigMap.LocalObjectReference.Name != "kube-root-ca.crt" { + // Permit mounting of service ca from a local configmap + if s.ConfigMap != nil && !(s.ConfigMap.LocalObjectReference.Name == "kube-root-ca.crt" || s.ConfigMap.LocalObjectReference.Name == "openshift-service-ca.crt") { return false } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go b/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go index d09a69ca027a..34c4e2869726 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go @@ -98,7 +98,7 @@ func (plugin *awsElasticBlockStorePlugin) newBlockVolumeMapperInternal(spec *vol partition = strconv.Itoa(int(ebs.Partition)) } - return &awsElasticBlockStoreMapper{ + mapper := &awsElasticBlockStoreMapper{ awsElasticBlockStore: &awsElasticBlockStore{ podUID: podUID, volName: spec.Name(), @@ -108,7 +108,16 @@ func (plugin *awsElasticBlockStorePlugin) newBlockVolumeMapperInternal(spec *vol mounter: mounter, plugin: plugin, }, - readOnly: readOnly}, nil + readOnly: readOnly, + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *awsElasticBlockStorePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -156,3 +165,9 @@ func (ebs *awsElasticBlockStore) GetPodDeviceMapPath() (string, string) { name := awsElasticBlockStorePluginName return ebs.plugin.host.GetPodVolumeDeviceDir(ebs.podUID, utilstrings.EscapeQualifiedName(name)), ebs.volName } + +// SupportsMetrics returns true for awsElasticBlockStore as it initializes the +// MetricsProvider. +func (ebs *awsElasticBlockStore) SupportsMetrics() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azuredd/azure_dd_block.go b/vendor/k8s.io/kubernetes/pkg/volume/azuredd/azure_dd_block.go index b13618290a43..1217e4a01961 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azuredd/azure_dd_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azuredd/azure_dd_block.go @@ -104,10 +104,18 @@ func (plugin *azureDataDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spe disk := makeDataDisk(spec.Name(), podUID, volumeSource.DiskName, plugin.host, plugin) - return &azureDataDiskMapper{ + mapper := &azureDataDiskMapper{ dataDisk: disk, readOnly: readOnly, - }, nil + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *azureDataDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -121,6 +129,7 @@ func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID ty type azureDataDiskUnmapper struct { *dataDisk + volume.MetricsNil } var _ volume.BlockVolumeUnmapper = &azureDataDiskUnmapper{} @@ -149,3 +158,9 @@ func (disk *dataDisk) GetPodDeviceMapPath() (string, string) { name := azureDataDiskPluginName return disk.plugin.host.GetPodVolumeDeviceDir(disk.podUID, utilstrings.EscapeQualifiedName(name)), disk.volumeName } + +// SupportsMetrics returns true for azureDataDiskMapper as it initializes the +// MetricsProvider. +func (addm *azureDataDiskMapper) SupportsMetrics() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go index b20680afcb93..ae3ab169b8e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go @@ -101,7 +101,7 @@ func (plugin *cinderPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podU return nil, err } - return &cinderVolumeMapper{ + mapper := &cinderVolumeMapper{ cinderVolume: &cinderVolume{ podUID: podUID, volName: spec.Name(), @@ -111,7 +111,16 @@ func (plugin *cinderPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podU mounter: mounter, plugin: plugin, }, - readOnly: readOnly}, nil + readOnly: readOnly, + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *cinderPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -131,6 +140,7 @@ func (plugin *cinderPlugin) newUnmapperInternal(volName string, podUID types.UID type cinderPluginUnmapper struct { *cinderVolume + volume.MetricsNil } var _ volume.BlockVolumeUnmapper = &cinderPluginUnmapper{} @@ -159,3 +169,9 @@ func (cd *cinderVolume) GetPodDeviceMapPath() (string, string) { name := cinderVolumePluginName return cd.plugin.host.GetPodVolumeDeviceDir(cd.podUID, utilstrings.EscapeQualifiedName(name)), cd.volName } + +// SupportsMetrics returns true for cinderVolumeMapper as it initializes the +// MetricsProvider. +func (cvm *cinderVolumeMapper) SupportsMetrics() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go index 2f1d542237aa..1748afeb1215 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go @@ -94,6 +94,7 @@ type csiBlockMapper struct { readOnly bool spec *volume.Spec podUID types.UID + volume.MetricsProvider } var _ volume.BlockVolumeMapper = &csiBlockMapper{} @@ -113,6 +114,12 @@ func (m *csiBlockMapper) GetStagingPath() string { return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "staging", m.specName) } +// SupportsMetrics returns true for csiBlockMapper as it initializes the +// MetricsProvider. +func (m *csiBlockMapper) SupportsMetrics() bool { + return true +} + // getPublishDir returns path to a directory, where the volume is published to each pod. // Example: plugins/kubernetes.io/csi/volumeDevices/publish/{specName} func (m *csiBlockMapper) getPublishDir() string { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go index 85c1c1f3db18..e380498da7fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -697,6 +697,13 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt } klog.V(4).Info(log("created path successfully [%s]", dataDir)) + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, errors.New(log("failed to get device path: %v", err)) + } + + mapper.MetricsProvider = NewMetricsCsi(pvSource.VolumeHandle, blockPath+"/"+string(podRef.UID), csiDriverName(pvSource.Driver)) + // persist volume info data for teardown node := string(p.host.GetNodeName()) attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go index c94c95d47a63..7b6495106a3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go @@ -23,13 +23,13 @@ import ( "strings" "time" - "k8s.io/klog/v2" - "k8s.io/mount-utils" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/mount-utils" ) type fcAttacher struct { @@ -172,12 +172,28 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error { if devName == "" { return nil } + unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host) - err = detacher.manager.DetachDisk(*unMounter, devName) + // The device is unmounted now. If UnmountDevice was retried, GetDeviceNameFromMount + // won't find any mount and won't return DetachDisk below. + // Therefore implement our own retry mechanism here. + // E.g. DetachDisk sometimes fails to flush a multipath device with "device is busy" when it was + // just unmounted. + // 2 minutes should be enough within 6 minute force detach timeout. + var detachError error + err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { + detachError = detacher.manager.DetachDisk(*unMounter, devName) + if detachError != nil { + klog.V(4).Infof("fc: failed to detach disk %s (%s): %v", devName, deviceMountPath, detachError) + return false, nil + } + return true, nil + }) if err != nil { - return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err) + return fmt.Errorf("fc: failed to detach disk: %s: %v", devName, detachError) } - klog.V(4).Infof("fc: successfully detached disk: %s", devName) + + klog.V(2).Infof("fc: successfully detached disk: %s", devName) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index ce441cd9e442..ad0cd5bc3661 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -26,6 +26,7 @@ import ( "k8s.io/klog/v2" "k8s.io/mount-utils" utilexec "k8s.io/utils/exec" + "k8s.io/utils/io" utilstrings "k8s.io/utils/strings" v1 "k8s.io/api/core/v1" @@ -171,7 +172,7 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mapper") } - return &fcDiskMapper{ + mapper := &fcDiskMapper{ fcDisk: &fcDisk{ podUID: podUID, volName: spec.Name(), @@ -184,7 +185,15 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), - }, nil + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -233,6 +242,11 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu var globalPDPath string mounter := plugin.host.GetMounter(plugin.GetPluginName()) paths, err := mounter.GetMountRefs(mountPath) + if io.IsInconsistentReadError(err) { + klog.Errorf("Failed to read mount refs from /proc/mounts for %s: %s", mountPath, err) + klog.Errorf("Kubelet cannot unmount volume at %s, please unmount it manually", mountPath) + return nil, err + } if err != nil { return nil, err } @@ -393,6 +407,7 @@ func (c *fcDiskUnmounter) TearDownAt(dir string) error { // Block Volumes Support type fcDiskMapper struct { *fcDisk + volume.MetricsProvider readOnly bool mounter mount.Interface deviceUtil util.DeviceUtil diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go b/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go index 3811483c1e1d..59c9c524653d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go @@ -108,7 +108,7 @@ func (plugin *gcePersistentDiskPlugin) newBlockVolumeMapperInternal(spec *volume partition = strconv.Itoa(int(volumeSource.Partition)) } - return &gcePersistentDiskMapper{ + mapper := &gcePersistentDiskMapper{ gcePersistentDisk: &gcePersistentDisk{ volName: spec.Name(), podUID: podUID, @@ -118,7 +118,16 @@ func (plugin *gcePersistentDiskPlugin) newBlockVolumeMapperInternal(spec *volume mounter: mounter, plugin: plugin, }, - readOnly: readOnly}, nil + readOnly: readOnly, + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *gcePersistentDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -165,3 +174,9 @@ func (pd *gcePersistentDisk) GetPodDeviceMapPath() (string, string) { name := gcePersistentDiskPluginName return pd.plugin.host.GetPodVolumeDeviceDir(pd.podUID, utilstrings.EscapeQualifiedName(name)), pd.volName } + +// SupportsMetrics returns true for gcePersistentDisk as it initializes the +// MetricsProvider. +func (pd *gcePersistentDisk) SupportsMetrics() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index 8017c2d0d1e9..24d4e2ab88c8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -27,6 +27,7 @@ import ( "k8s.io/klog/v2" "k8s.io/mount-utils" utilexec "k8s.io/utils/exec" + "k8s.io/utils/io" "k8s.io/utils/keymutex" utilstrings "k8s.io/utils/strings" @@ -161,12 +162,20 @@ func (plugin *iscsiPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUI if err != nil { return nil, err } - return &iscsiDiskMapper{ + mapper := &iscsiDiskMapper{ iscsiDisk: iscsiDisk, readOnly: readOnly, exec: exec, deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), - }, nil + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -211,6 +220,11 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v var globalPDPath string mounter := plugin.host.GetMounter(plugin.GetPluginName()) paths, err := mounter.GetMountRefs(mountPath) + if io.IsInconsistentReadError(err) { + klog.Errorf("Failed to read mount refs from /proc/mounts for %s: %s", mountPath, err) + klog.Errorf("Kubelet cannot unmount volume at %s, please unmount it and all mounts of the same device manually.", mountPath) + return nil, err + } if err != nil { return nil, err } @@ -385,6 +399,13 @@ type iscsiDiskUnmapper struct { *iscsiDisk exec utilexec.Interface deviceUtil ioutil.DeviceUtil + volume.MetricsNil +} + +// SupportsMetrics returns true for SupportsMetrics as it initializes the +// MetricsProvider. +func (idm *iscsiDiskMapper) SupportsMetrics() bool { + return true } var _ volume.BlockVolumeUnmapper = &iscsiDiskUnmapper{} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index 81a2f9424c8d..b2cfa9b28f6b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -161,7 +161,7 @@ func (plugin *localVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1 return nil, err } - return &localVolumeMapper{ + mapper := &localVolumeMapper{ localVolume: &localVolume{ podUID: pod.UID, volName: spec.Name(), @@ -169,8 +169,15 @@ func (plugin *localVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1 plugin: plugin, }, readOnly: readOnly, - }, nil + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(pod.UID))) + return mapper, nil } func (plugin *localVolumePlugin) NewBlockVolumeUnmapper(volName string, @@ -626,9 +633,16 @@ func (m *localVolumeMapper) GetStagingPath() string { return "" } +// SupportsMetrics returns true for SupportsMetrics as it initializes the +// MetricsProvider. +func (m *localVolumeMapper) SupportsMetrics() bool { + return true +} + // localVolumeUnmapper implements the BlockVolumeUnmapper interface for local volumes. type localVolumeUnmapper struct { *localVolume + volume.MetricsNil } var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_block.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_block.go new file mode 100644 index 000000000000..e0145ae91af1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_block.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "fmt" + "io" + "os" + "runtime" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ MetricsProvider = &metricsBlock{} + +// metricsBlock represents a MetricsProvider that detects the size of the +// BlockMode Volume. +type metricsBlock struct { + // the device node where the volume is attached to. + device string +} + +// NewMetricsStatfs creates a new metricsBlock with the device node of the +// Volume. +func NewMetricsBlock(device string) MetricsProvider { + return &metricsBlock{device} +} + +// See MetricsProvider.GetMetrics +// GetMetrics detects the size of the BlockMode volume for the device node +// where the Volume is attached. +// +// Note that only the capacity of the device can be detected with standard +// tools. Storage systems may have more information that they can provide by +// going through specialized APIs. +func (mb *metricsBlock) GetMetrics() (*Metrics, error) { + // TODO: Windows does not yet support VolumeMode=Block + if runtime.GOOS == "windows" { + return nil, NewNotImplementedError("Windows does not support Block volumes") + } + + metrics := &Metrics{Time: metav1.Now()} + if mb.device == "" { + return metrics, NewNoPathDefinedError() + } + + err := mb.getBlockInfo(metrics) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +// getBlockInfo fetches metrics.Capacity by opening the device and seeking to +// the end. +func (mb *metricsBlock) getBlockInfo(metrics *Metrics) error { + dev, err := os.Open(mb.device) + if err != nil { + return fmt.Errorf("unable to open device %q: %w", mb.device, err) + } + defer dev.Close() + + end, err := dev.Seek(0, io.SeekEnd) + if err != nil { + return fmt.Errorf("failed to detect size of %q: %w", mb.device, err) + } + + metrics.Capacity = resource.NewQuantity(end, resource.BinarySI) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go index a6cbdbf72034..0f7987e0936b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_errors.go @@ -35,6 +35,14 @@ func NewNotSupportedError() *MetricsError { } } +// NewNotImplementedError creates a new MetricsError with code NotSupported. +func NewNotImplementedError(reason string) *MetricsError { + return &MetricsError{ + Code: ErrCodeNotSupported, + Msg: fmt.Sprintf("metrics support is not implemented: %s", reason), + } +} + // NewNotSupportedErrorWithDriverName creates a new MetricsError with code NotSupported. // driver name is added to the error message. func NewNotSupportedErrorWithDriverName(name string) *MetricsError { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go b/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go index 5438dc3de353..11b74e079785 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/metrics_nil.go @@ -23,6 +23,11 @@ var _ MetricsProvider = &MetricsNil{} // metrics. type MetricsNil struct{} +// SupportsMetrics returns false for the MetricsNil type. +func (*MetricsNil) SupportsMetrics() bool { + return false +} + // GetMetrics returns an empty Metrics and an error. // See MetricsProvider.GetMetrics func (*MetricsNil) GetMetrics() (*Metrics, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index a624d736aa7e..cf9e1049f6c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -524,13 +524,21 @@ func (plugin *rbdPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID return nil, err } - return &rbdDiskMapper{ + mapper := &rbdDiskMapper{ rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager), mon: mon, id: id, keyring: keyring, secret: secret, - }, nil + } + + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *rbdPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -930,6 +938,12 @@ func (rbd *rbd) rbdPodDeviceMapPath() (string, string) { return rbd.plugin.host.GetPodVolumeDeviceDir(rbd.podUID, utilstrings.EscapeQualifiedName(name)), rbd.volName } +// SupportsMetrics returns true for rbdDiskMapper as it initializes the +// MetricsProvider. +func (rdm *rbdDiskMapper) SupportsMetrics() bool { + return true +} + type rbdDiskUnmapper struct { *rbdDiskMapper } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index c6fb49192e86..e6fe7ff9d5bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -373,6 +373,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) { // writePayloadToDir writes the given payload to the given directory. The // directory must exist. func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error { + isNotWindows := runtime.GOOS != "windows" for userVisiblePath, fileProjection := range payload { content := fileProjection.Data mode := os.FileMode(fileProjection.Mode) @@ -400,9 +401,11 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir if fileProjection.FsUser == nil { continue } - if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil { - klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err) - return err + if isNotWindows { + if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil { + klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err) + return err + } } } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go index 2ec1e5233bab..63246a85a5d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -48,6 +48,14 @@ type BlockVolume interface { // and name of a symbolic link associated to a block device. // ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/, {volumeName} GetPodDeviceMapPath() (string, string) + + // SupportsMetrics should return true if the MetricsProvider is + // initialized + SupportsMetrics() bool + + // MetricsProvider embeds methods for exposing metrics (e.g. + // used, available space). + MetricsProvider } // MetricsProvider exposes metrics (e.g. used,available space) related to a diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go index f12080490693..721c252ebc9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go @@ -97,7 +97,7 @@ func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spe return nil, err } volPath := volumeSource.VolumePath - return &vsphereBlockVolumeMapper{ + mapper := &vsphereBlockVolumeMapper{ vsphereVolume: &vsphereVolume{ volName: spec.Name(), podUID: podUID, @@ -107,8 +107,15 @@ func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spe plugin: plugin, MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), }, - }, nil + } + blockPath, err := mapper.GetGlobalMapPath(spec) + if err != nil { + return nil, fmt.Errorf("failed to get device path: %v", err) + } + mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID))) + + return mapper, nil } func (plugin *vsphereVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { @@ -137,6 +144,7 @@ var _ volume.BlockVolumeUnmapper = &vsphereBlockVolumeUnmapper{} type vsphereBlockVolumeUnmapper struct { *vsphereVolume + volume.MetricsNil } // GetGlobalMapPath returns global map path and error @@ -152,3 +160,9 @@ func (v *vsphereVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) { func (v *vsphereVolume) GetPodDeviceMapPath() (string, string) { return v.plugin.host.GetPodVolumeDeviceDir(v.podUID, utilstrings.EscapeQualifiedName(vsphereVolumePluginName)), v.volName } + +// SupportsMetrics returns true for vsphereBlockVolumeMapper as it initializes the +// MetricsProvider. +func (vbvm *vsphereBlockVolumeMapper) SupportsMetrics() bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go index 6a003f75e0a6..07173e841da1 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go @@ -576,6 +576,19 @@ func TokenVolumeSource() *api.ProjectedVolumeSource { }, }, }, + { + ConfigMap: &api.ConfigMapProjection{ + LocalObjectReference: api.LocalObjectReference{ + Name: "openshift-service-ca.crt", + }, + Items: []api.KeyToPath{ + { + Key: "service-ca.crt", + Path: "service-ca.crt", + }, + }, + }, + }, }, } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index 3cd6390a02c1..39a08f3a9b80 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -408,6 +408,13 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) eventsRule(), }, }) + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-ca-cert-publisher"}, + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create", "update").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + eventsRule(), + }, + }) if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerIdentity) { addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go index c7de25343081..330939eac65c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go @@ -47,6 +47,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { c clientset.Interface ns string pvc *v1.PersistentVolumeClaim + pvcBlock *v1.PersistentVolumeClaim metricsGrabber *e2emetrics.Grabber invalidSc *storagev1.StorageClass defaultScName string @@ -67,9 +68,17 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ClaimSize: "2Gi", } + fsMode := v1.PersistentVolumeFilesystem pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, - VolumeMode: &test.VolumeMode, + VolumeMode: &fsMode, + }, ns) + + // selected providers all support PersistentVolumeBlock + blockMode := v1.PersistentVolumeBlock + pvcBlock = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ + ClaimSize: test.ClaimSize, + VolumeMode: &blockMode, }, ns) metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false) @@ -201,7 +210,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true) }) - ginkgo.It("should create volume metrics with the correct PVC ref", func() { + ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -258,6 +267,71 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) }) + ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func() { + var err error + pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(context.TODO(), pvcBlock, metav1.CreateOptions{}) + framework.ExpectNoError(err) + framework.ExpectNotEqual(pvcBlock, nil) + + pod := e2epod.MakePod(ns, nil, nil, false, "") + pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{ + Name: pvcBlock.Name, + DevicePath: "/mnt/" + pvcBlock.Name, + }} + pod.Spec.Volumes = []v1.Volume{{ + Name: pvcBlock.Name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcBlock.Name, + ReadOnly: false, + }, + }, + }} + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart) + framework.ExpectNoError(err, "Error starting pod ", pod.Name) + + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + // Verify volume stat metrics were collected for the referenced PVC + volumeStatKeys := []string{ + // BlockMode PVCs only support capacity (for now) + kubeletmetrics.VolumeStatsCapacityBytesKey, + } + key := volumeStatKeys[0] + kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) + // Poll kubelet metrics waiting for the volume to be picked up + // by the volume stats collector + var kubeMetrics e2emetrics.KubeletMetrics + waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { + framework.Logf("Grabbing Kubelet metrics") + // Grab kubelet metrics from the node the pod was scheduled on + var err error + kubeMetrics, err = metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) + if err != nil { + framework.Logf("Error fetching kubelet metrics") + return false, err + } + if !findVolumeStatMetric(kubeletKeyName, pvcBlock.Namespace, pvcBlock.Name, kubeMetrics) { + return false, nil + } + return true, nil + }) + framework.ExpectNoError(waitErr, "Unable to find metric %s for PVC %s/%s", kubeletKeyName, pvcBlock.Namespace, pvcBlock.Name) + + for _, key := range volumeStatKeys { + kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) + found := findVolumeStatMetric(kubeletKeyName, pvcBlock.Namespace, pvcBlock.Name, kubeMetrics) + framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcBlock.Name, pvcBlock.Namespace, kubeletKeyName) + } + + framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) + framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) + }) + ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) diff --git a/vendor/k8s.io/mount-utils/go.mod b/vendor/k8s.io/mount-utils/go.mod index 70a160beee1b..2ef6cd2d155d 100644 --- a/vendor/k8s.io/mount-utils/go.mod +++ b/vendor/k8s.io/mount-utils/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.6.1 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect k8s.io/klog/v2 v2.8.0 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + k8s.io/utils v0.0.0-20210521133846-da695404a2bc ) replace k8s.io/mount-utils => ../mount-utils diff --git a/vendor/k8s.io/mount-utils/go.sum b/vendor/k8s.io/mount-utils/go.sum index 6fadc4d5828c..863d0aec99a8 100644 --- a/vendor/k8s.io/mount-utils/go.sum +++ b/vendor/k8s.io/mount-utils/go.sum @@ -27,5 +27,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/k8s.io/mount-utils/mount_helper_unix.go b/vendor/k8s.io/mount-utils/mount_helper_unix.go index 2c14a27c7192..f3658647eea2 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -32,7 +32,7 @@ const ( // At least number of fields per line in /proc//mountinfo. expectedAtLeastNumFieldsPerMountInfo = 10 // How many times to retry for a consistent read of /proc/mounts. - maxListTries = 3 + maxListTries = 10 ) // IsCorruptedMnt return true if err is about corrupted mount point diff --git a/vendor/k8s.io/utils/io/read.go b/vendor/k8s.io/utils/io/read.go index 16a638d764bf..f0af3c8ec8a3 100644 --- a/vendor/k8s.io/utils/io/read.go +++ b/vendor/k8s.io/utils/io/read.go @@ -30,6 +30,9 @@ var ErrLimitReached = errors.New("the read limit is reached") // ConsistentRead repeatedly reads a file until it gets the same content twice. // This is useful when reading files in /proc that are larger than page size // and kernel may modify them between individual read() syscalls. +// It returns InconsistentReadError when it cannot get a consistent read in +// given nr. of attempts. Caller should retry, kernel is probably under heavy +// mount/unmount load. func ConsistentRead(filename string, attempts int) ([]byte, error) { return consistentReadSync(filename, attempts, nil) } @@ -56,7 +59,28 @@ func consistentReadSync(filename string, attempts int, sync func(int)) ([]byte, // Files are different, continue reading oldContent = newContent } - return nil, fmt.Errorf("could not get consistent content of %s after %d attempts", filename, attempts) + return nil, InconsistentReadError{filename, attempts} +} + +// InconsistentReadError is returned from ConsistentRead when it cannot get +// a consistent read in given nr. of attempts. Caller should retry, kernel is +// probably under heavy mount/unmount load. +type InconsistentReadError struct { + filename string + attempts int +} + +func (i InconsistentReadError) Error() string { + return fmt.Sprintf("could not get consistent content of %s after %d attempts", i.filename, i.attempts) +} + +var _ error = InconsistentReadError{} + +func IsInconsistentReadError(err error) bool { + if _, ok := err.(InconsistentReadError); ok { + return true + } + return false } // ReadAtMost reads up to `limit` bytes from `r`, and reports an error diff --git a/vendor/k8s.io/utils/mount/README.md b/vendor/k8s.io/utils/mount/README.md new file mode 100644 index 000000000000..e66b5c27a041 --- /dev/null +++ b/vendor/k8s.io/utils/mount/README.md @@ -0,0 +1,11 @@ +# WARNING ! Please read before using mount functionality +# THIS REPOSITORY is moved : Please use https://github.com/kubernetes/mount-utils for all your work + +This package has been moved to new location. Please use the new repo for bug fixes and enhancements. +All existing dependencies on this repo are being removed. Eventually this repo will be deprecated. +If you are using this repo or planning to use, you must use the new repo mentioned here for this functionality. + +New repo : https://github.com/kubernetes/mount-utils +New go module: k8s.io/mount-utils +For Kubernetes/Kubernetes project the code is available under staging directory. + diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 0a55a844ee71..1da6f6664a3d 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -46,86 +46,182 @@ func AllPtrFieldsNil(obj interface{}) bool { return true } -// Int32Ptr returns a pointer to an int32 -func Int32Ptr(i int32) *int32 { +// Int32 returns a pointer to an int32. +func Int32(i int32) *int32 { return &i } -// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, -// else returns def. -func Int32PtrDerefOr(ptr *int32, def int32) int32 { +var Int32Ptr = Int32 // for back-compat + +// Int32Deref dereferences the int32 ptr and returns it if not nil, or else +// returns def. +func Int32Deref(ptr *int32, def int32) int32 { if ptr != nil { return *ptr } return def } -// Int64Ptr returns a pointer to an int64 -func Int64Ptr(i int64) *int64 { +var Int32PtrDerefOr = Int32Deref // for back-compat + +// Int32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int32Equal(a, b *int32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Int64 returns a pointer to an int64. +func Int64(i int64) *int64 { return &i } -// Int64PtrDerefOr dereference the int64 ptr and returns it if not nil, -// else returns def. -func Int64PtrDerefOr(ptr *int64, def int64) int64 { +var Int64Ptr = Int64 // for back-compat + +// Int64Deref dereferences the int64 ptr and returns it if not nil, or else +// returns def. +func Int64Deref(ptr *int64, def int64) int64 { if ptr != nil { return *ptr } return def } -// BoolPtr returns a pointer to a bool -func BoolPtr(b bool) *bool { +var Int64PtrDerefOr = Int64Deref // for back-compat + +// Int64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Int64Equal(a, b *int64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Bool returns a pointer to a bool. +func Bool(b bool) *bool { return &b } -// BoolPtrDerefOr dereference the bool ptr and returns it if not nil, -// else returns def. -func BoolPtrDerefOr(ptr *bool, def bool) bool { +var BoolPtr = Bool // for back-compat + +// BoolDeref dereferences the bool ptr and returns it if not nil, or else +// returns def. +func BoolDeref(ptr *bool, def bool) bool { if ptr != nil { return *ptr } return def } -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { +var BoolPtrDerefOr = BoolDeref // for back-compat + +// BoolEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func BoolEqual(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// String returns a pointer to a string. +func String(s string) *string { return &s } -// StringPtrDerefOr dereference the string ptr and returns it if not nil, -// else returns def. -func StringPtrDerefOr(ptr *string, def string) string { +var StringPtr = String // for back-compat + +// StringDeref dereferences the string ptr and returns it if not nil, or else +// returns def. +func StringDeref(ptr *string, def string) string { if ptr != nil { return *ptr } return def } -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { +var StringPtrDerefOr = StringDeref // for back-compat + +// StringEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func StringEqual(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float32 returns a pointer to the a float32. +func Float32(i float32) *float32 { return &i } -// Float32PtrDerefOr dereference the float32 ptr and returns it if not nil, -// else returns def. -func Float32PtrDerefOr(ptr *float32, def float32) float32 { +var Float32Ptr = Float32 + +// Float32Deref dereferences the float32 ptr and returns it if not nil, or else +// returns def. +func Float32Deref(ptr *float32, def float32) float32 { if ptr != nil { return *ptr } return def } -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { +var Float32PtrDerefOr = Float32Deref // for back-compat + +// Float32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float32Equal(a, b *float32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + +// Float64 returns a pointer to the a float64. +func Float64(i float64) *float64 { return &i } -// Float64PtrDerefOr dereference the float64 ptr and returns it if not nil, -// else returns def. -func Float64PtrDerefOr(ptr *float64, def float64) float64 { +var Float64Ptr = Float64 + +// Float64Deref dereferences the float64 ptr and returns it if not nil, or else +// returns def. +func Float64Deref(ptr *float64, def float64) float64 { if ptr != nil { return *ptr } return def } + +var Float64PtrDerefOr = Float64Deref // for back-compat + +// Float64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Float64Equal(a, b *float64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a51177cf2011..7770bda0484e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -608,7 +608,7 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.0-rc95 +# github.com/opencontainers/runc v1.0.0-rc95.0.20210608002938-1f5126fe967e => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e github.com/opencontainers/runc/libcontainer github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/capabilities @@ -1405,7 +1405,7 @@ gopkg.in/warnings.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gopkg.in/yaml.v3 -# k8s.io/api v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/api v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1453,7 +1453,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/apiextensions-apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1497,7 +1497,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/apimachinery v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1562,7 +1562,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1704,12 +1704,12 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/cli-runtime v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/client-go v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2001,7 +2001,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210624185839-c6914a80ec2e k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/credentialconfig @@ -2010,12 +2010,12 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210624185839-c6914a80ec2e k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/component-base v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2038,7 +2038,7 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/term k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -2048,16 +2048,16 @@ k8s.io/component-helpers/node/topology k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210624185839-c6914a80ec2e k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210624185839-c6914a80ec2e k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog/v2 v2.8.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/kube-aggregator v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210624185839-c6914a80ec2e k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -2101,11 +2101,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210624185839-c6914a80ec2e k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210624185839-c6914a80ec2e k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/kubectl v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2130,7 +2130,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/kubelet v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/kubelet/config/v1alpha1 k8s.io/kubelet/config/v1beta1 @@ -2143,7 +2143,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.21.1 => github.com/openshift/kubernetes v1.21.2-0.20210603185452-2dfc46b23003 +# k8s.io/kubernetes v1.21.1 => github.com/openshift/kubernetes v1.21.2-0.20210624185839-c6914a80ec2e ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -2172,6 +2172,7 @@ k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/cl k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/config k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/console k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration +k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network @@ -2610,6 +2611,7 @@ k8s.io/kubernetes/pkg/registry/discovery/endpointslice/storage k8s.io/kubernetes/pkg/registry/discovery/rest k8s.io/kubernetes/pkg/registry/events/rest k8s.io/kubernetes/pkg/registry/extensions/rest +k8s.io/kubernetes/pkg/registry/flowcontrol/ensurer k8s.io/kubernetes/pkg/registry/flowcontrol/flowschema k8s.io/kubernetes/pkg/registry/flowcontrol/flowschema/storage k8s.io/kubernetes/pkg/registry/flowcontrol/prioritylevelconfiguration @@ -2897,7 +2899,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/legacy-cloud-providers v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210624185839-c6914a80ec2e ## explicit k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure @@ -2941,7 +2943,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210624185839-c6914a80ec2e k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 @@ -2950,12 +2952,12 @@ k8s.io/metrics/pkg/apis/external_metrics/v1beta1 k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics/scheme k8s.io/metrics/pkg/client/external_metrics -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210624185839-c6914a80ec2e k8s.io/mount-utils -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210603185452-2dfc46b23003 +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210624185839-c6914a80ec2e k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 -# k8s.io/utils v0.0.0-20201110183641-67b214c5f920 +# k8s.io/utils v0.0.0-20210521133846-da695404a2bc ## explicit k8s.io/utils/buffer k8s.io/utils/clock @@ -3060,30 +3062,31 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml # github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a # github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210603185452-2dfc46b23003 -# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210603185452-2dfc46b23003 -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210603185452-2dfc46b23003 +# github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210624185839-c6914a80ec2e +# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210624185839-c6914a80ec2e +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210624185839-c6914a80ec2e