diff --git a/go.mod b/go.mod index 6717aa10192a..73edc6031ed1 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/openshift/apiserver-library-go v0.0.0-20211209162547-8c11dbc46b6e github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 - github.com/openshift/library-go v0.0.0-20220315122757-21a67f25d837 + github.com/openshift/library-go v0.0.0-20220111125907-7f25b9c7ad22 github.com/pborman/uuid v1.2.0 github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac // indirect github.com/prometheus/client_golang v1.11.0 @@ -80,32 +80,32 @@ require ( replace ( github.com/google/cadvisor => github.com/google/cadvisor v0.43.0 github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.24.0-alpha.0.0.20220325133350-2a2851ce61f8 - k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20220325133350-2a2851ce61f8 - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20220325133350-2a2851ce61f8 + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220130045626-42a86de5afdd + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220130045626-42a86de5afdd + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220130045626-42a86de5afdd + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220130045626-42a86de5afdd + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220130045626-42a86de5afdd + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220130045626-42a86de5afdd + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220130045626-42a86de5afdd + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220130045626-42a86de5afdd + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220130045626-42a86de5afdd + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220130045626-42a86de5afdd + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220130045626-42a86de5afdd + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220130045626-42a86de5afdd + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220130045626-42a86de5afdd + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220130045626-42a86de5afdd + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220130045626-42a86de5afdd + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220130045626-42a86de5afdd + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220130045626-42a86de5afdd + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220130045626-42a86de5afdd + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220130045626-42a86de5afdd + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220130045626-42a86de5afdd + k8s.io/kubernetes => github.com/openshift/kubernetes v1.22.2-0.20220130045626-42a86de5afdd + k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220130045626-42a86de5afdd + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220130045626-42a86de5afdd + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220130045626-42a86de5afdd + k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220130045626-42a86de5afdd + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220130045626-42a86de5afdd + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20220130045626-42a86de5afdd + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20220130045626-42a86de5afdd ) diff --git a/go.sum b/go.sum index 141e51eef6d9..a02445c5b60f 100644 --- a/go.sum +++ b/go.sum @@ -677,58 +677,58 @@ github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 h1:65 github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 h1:SG1aqwleU6bGD0X4mhkTNupjVnByMYYuW4XbnCPavQU= github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3/go.mod h1:cwhyki5lqBmrT0m8Im+9I7PGFaraOzcYPtEz93RcsGY= -github.com/openshift/kubernetes v1.24.0-alpha.0.0.20220325133350-2a2851ce61f8 h1:FJ88JeNDwuqoxDPntl/113qza1UgsviLAmrSyGYoLeQ= -github.com/openshift/kubernetes v1.24.0-alpha.0.0.20220325133350-2a2851ce61f8/go.mod h1:Enkd3wGBZtthcjGYKDRS06yQOfZCEfaov7SFeiazEnY= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220325133350-2a2851ce61f8 h1:IKxT5+PulyAqgs71XqBEVX/HuM2CPkKHebM6YmeThDI= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:1LnqeLh1GMPIkKYxudCAHbZAGrxIuQwANtqoYpjsRME= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220325133350-2a2851ce61f8 h1:AvlE2HVyACXymTiPrj/dvc+Hdmooo6NZLedh9+XN5us= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:gW566YvfJ4K5Tsf6spaDaZKxhpUCH6v2Iw3ZZOjBttA= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220325133350-2a2851ce61f8 h1:WAbnxzuXfu0UdLPwXUI1qVtoMIxY7b440V9dj39uxmM= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:fwrOsHdh6O6ak7ltlXTTCnqmgjEXmUqqzjkWnBJXncg= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220325133350-2a2851ce61f8 h1:J0cJaa3MKbbCfgKM5XTGYCKYcl3ptWNd6IMclI1yeU0= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:DOpxgxp7TznJFdHnWoVMtfcAY3bECdjvSQ0yRRcMu+g= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220325133350-2a2851ce61f8 h1:nafacLa/PsSaP8CEQPW0PfN1ZDWTozD1KAmlytVldJc= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:JHRa8KgQDXgogFWDhE8mvZUmlecewxrTeW31SRil8Cs= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220325133350-2a2851ce61f8 h1:UDvJYWcyLtkmfwzj/Yes8DnqdGGZEFoaGtaM6gM2exw= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:xt+8/diFeviyHB+htvW0xSgvQSDKditw7v2b37YyqhE= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220325133350-2a2851ce61f8 h1:bEI31WYFvJnRX/GW/tpfdfQvAVsiVbWlPNzSj1s8liU= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:TkMkKlXSDqvc+S7l29IyJhBm+z54By1rGYVXaW2uWo8= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220325133350-2a2851ce61f8 h1:wmmRG1cv4PbrQWOliGZABWJf2iWmAM705yMjIU8QqgI= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:aH2+dreOtk3mXPPv3hJumE9ldPh2B6w+kuzuS1vN3IA= -github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:2B7z147SwDpjBJrVJill7yqmAEeP0n3o20y3JdGJvjY= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220325133350-2a2851ce61f8 h1:EIX4GPgtXwvhh5u3B9OTfmsUsl6TsPwyO2B9ymsT3lI= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:RN2o27fWKKIJMD2OtFso+DMQwG53FVIBjk4umQdJWF8= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220325133350-2a2851ce61f8 h1:KbOuuhu/ZyUqVaxbI67he8lO8miX7frEAMifyHkEZ/A= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:rMNCwGaLyQQvoeNNhUnVMVPNqsXNiSQ9XLbuZ6lwNoA= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:lQ7O8g/3qZhZ1Y+o29PWzlItuFOHjg05IuwO2TxJEYQ= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220325133350-2a2851ce61f8 h1:0U3igeXnOJMQkFyDQZk5djoF7SzO3osswPHS+YKHIjc= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:Qls61FIA7GOKCWlPHMEWyz4zm6loDfdUyMdAss5SJe4= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220325133350-2a2851ce61f8 h1:CLzAgUCljJ/niJ7e6fuhHKLjRySQ9AvH0Kv9vgp/XHI= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:0QDwleGA6GqqGSc8t5mqd6xh22qZx7g/n+OkpcGoivw= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220325133350-2a2851ce61f8 h1:3kV7kg4Z7Az0Hi06/zG3RY635cz1rwI15eKAcBgiPaM= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:MO0bXdWNVG7AUIkto5NRQk17Q6mXujAhi9ASaVSg1fk= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:0DC8pZTZx9wTeDa0D021XwtBS528LDiPRMYUZbtruVg= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220325133350-2a2851ce61f8 h1:mtnyGeocBgBdc0dYRnLuekpzAV7AB2n4Sk2d4xBB+YM= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:MK94azsNnhgzx4CycaU9cvRJi504gmHe+pn7P/Pa4Ss= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220325133350-2a2851ce61f8 h1:hF+QfIBK/Am2LLWpfM4btTgsCXoHBZaWu3jjIa6A3IQ= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:OjoVYhh5XhHtvC1XCyh1a0uW1nyeReXeyQeTjUBtimM= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220325133350-2a2851ce61f8 h1:T1sddI3mwaRPy/1VtCpQ//GFpKMn+Jb1CmU5WDptO20= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:T4bIg8Fni/bo5/3Z08pSu46FOC4e3P4FNoDkN5LI3cU= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220325133350-2a2851ce61f8 h1:GLfhAgrlSiq16TBxGWAQ48jOQvPIc6XIIxQX9wRt2AQ= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:UhzYnFVXk8zlbWYEEDc8NW1Q4u98ArGI/Hqg2XqaBso= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220325133350-2a2851ce61f8 h1:lbDN3iIEgf8+zcW8zobFc+0Lul7KRQ1ORbBRRMQp1ZQ= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:ze31ZcENsPXfUiXlAxhPx4CCQ6BYuy1fcXarb0sU2y8= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220325133350-2a2851ce61f8 h1:AyKuzQNfXzIL+ziQRBYc8+ShgVTt1AY+SHqeuL8JhjE= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:rWP94CvU4sLrhD9xenELrHPV0mDbpT9q6PltC6BorH0= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220325133350-2a2851ce61f8 h1:JDTB1ntjwo2W9qPAO+S9IhTFXvyJs75uxfEqTLM6vU4= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:TIS/S5r7P8BI3d0utDzk89Y5V9BKAwado5sdPj24biM= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220325133350-2a2851ce61f8 h1:CvmwJvKEk4gh/TwpPM0FGubXkNcMQ660MmU9H/rA64g= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:zCxo6wb4XKXYY2OUThB4vzsqFabvOfKYo/lvgEkfCYk= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220325133350-2a2851ce61f8 h1:640rO046/gtzW7KwFPliCB5qGoADc9anSTkkji8gkjs= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220325133350-2a2851ce61f8/go.mod h1:SzYUEBny/tNNzkgPDf//V6g3lw15pIlGhCeXNoQkQnA= +github.com/openshift/kubernetes v1.22.2-0.20220130045626-42a86de5afdd h1:pDuiW3NTtwasoL140IUcWV1N7kMhygeGxTSbOAh6UPI= +github.com/openshift/kubernetes v1.22.2-0.20220130045626-42a86de5afdd/go.mod h1:h/kCv9M4suLZy3aiBokgfyWTVNamuMSnFzA/LWULR8g= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220130045626-42a86de5afdd h1:ICUGdECC+2f2o+GnY0R2XmJpy3npIu7ONlVQ7RLy+D4= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220130045626-42a86de5afdd/go.mod h1:VlMmczd3Czr+Lmbk/y2iRYQk3C3YquBVF/qiXr8T+g8= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220130045626-42a86de5afdd h1:0kXUUGayOC8PrmAS4y8/sXy4pU6r3lxjvq74uTz4p70= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220130045626-42a86de5afdd/go.mod h1:cwc9jAQTW2dZ92q1nZeFUVryS3MXxQrIVE1rta0IALI= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220130045626-42a86de5afdd h1:0gKx2e9pRWSubOOUsjPXVITaUvwzqaHN9VXknJNWENI= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220130045626-42a86de5afdd/go.mod h1:fwrOsHdh6O6ak7ltlXTTCnqmgjEXmUqqzjkWnBJXncg= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220130045626-42a86de5afdd h1:LOb4vlDMvisiRZRm9NkdkpxnNNXEi80zIiz9eDcXH6Y= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220130045626-42a86de5afdd/go.mod h1:GpPl+5eOx4wqCMGgDL5VXUJQ0xM3mJRY/8fxPpjF2p0= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220130045626-42a86de5afdd h1:AFYAyLXTlNafIiQsPuuVObUFoZsfLd5zTdD82qjimGM= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220130045626-42a86de5afdd/go.mod h1:X8fr5/mAakm1i7ODIacL9Ny+Lft7Yk0rRjS9Ss+4hVA= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220130045626-42a86de5afdd h1:DvOAmK6XXElDcP575Tu6s+HW8mwac2TcJO4MQo1nf0g= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220130045626-42a86de5afdd/go.mod h1:xt+8/diFeviyHB+htvW0xSgvQSDKditw7v2b37YyqhE= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220130045626-42a86de5afdd h1:sGdjq63z0UiWq68YQ7gCjgDiVLTi7i83XE03ZHbvcwY= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220130045626-42a86de5afdd/go.mod h1:fEOscWQneamL/AO4j2xUscTqXd15I5erjKNIONWviq8= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220130045626-42a86de5afdd h1:vEF58u7LhdKQ5ODw0lioD1Uhb4o9DepTE26gcz6PkkM= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220130045626-42a86de5afdd/go.mod h1:aH2+dreOtk3mXPPv3hJumE9ldPh2B6w+kuzuS1vN3IA= +github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220130045626-42a86de5afdd/go.mod h1:2B7z147SwDpjBJrVJill7yqmAEeP0n3o20y3JdGJvjY= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220130045626-42a86de5afdd h1:2rJhR2uZjP2GhQnk92zA14cUy6fkC+kHcp6VZv9zo+U= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220130045626-42a86de5afdd/go.mod h1:RN2o27fWKKIJMD2OtFso+DMQwG53FVIBjk4umQdJWF8= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220130045626-42a86de5afdd h1:tSFsgMC0p8+1sFG2lP1uFc4nS7INELzFEXTY8dVCHaw= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220130045626-42a86de5afdd/go.mod h1:rMNCwGaLyQQvoeNNhUnVMVPNqsXNiSQ9XLbuZ6lwNoA= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220130045626-42a86de5afdd/go.mod h1:io0Cm9lvn0iwycYsTEaMoEY/AiyWx2vRTIQbm5Ga0xw= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220130045626-42a86de5afdd h1:cjQw1JfID3Ly+3shh3JbB2ITQUsbeT+iG1jd5ciOi6M= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220130045626-42a86de5afdd/go.mod h1:Qls61FIA7GOKCWlPHMEWyz4zm6loDfdUyMdAss5SJe4= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220130045626-42a86de5afdd h1:9Iwlc/y6ZUhxs0QUMNxZ53FHMesW+GgJtO26lP+Y1EU= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220130045626-42a86de5afdd/go.mod h1:0QDwleGA6GqqGSc8t5mqd6xh22qZx7g/n+OkpcGoivw= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220130045626-42a86de5afdd h1:AotChQB8k2T/LjZQvNuFuoWXJrTa0MmYd2pbNrQPgeU= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220130045626-42a86de5afdd/go.mod h1:QLbl1/OlmkPgQ+yglLY9QM27tr8xmj1KwTUz0x32goc= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220130045626-42a86de5afdd/go.mod h1:4ND8ycj04rDamgkeD/OUD7PXR3ht/wynel1AXtPUAfA= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220130045626-42a86de5afdd h1:jZBluKROdHt4dOJA1Aitd7xnwQEfTN8I7i6ZqSNaJ2Y= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220130045626-42a86de5afdd/go.mod h1:MK94azsNnhgzx4CycaU9cvRJi504gmHe+pn7P/Pa4Ss= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220130045626-42a86de5afdd h1:lJICrlKC8SmYM6+dtoRIKFPaH3SUHZMCcxTAqt6o5kM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220130045626-42a86de5afdd/go.mod h1:OjoVYhh5XhHtvC1XCyh1a0uW1nyeReXeyQeTjUBtimM= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220130045626-42a86de5afdd h1:qVspedwTnPT9GctGLj/j7TZ8RBSk/iyDxjHEHB6dIbw= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220130045626-42a86de5afdd/go.mod h1:vg+Oq6TLSAGBx3M0mQdPiMqbTfEYCVtfUFoQD8UIkmw= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220130045626-42a86de5afdd h1:qkWZxjsIiVEIvWokR+p5Y0OJoRioFqvdNGLyMawXso0= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220130045626-42a86de5afdd/go.mod h1:UhzYnFVXk8zlbWYEEDc8NW1Q4u98ArGI/Hqg2XqaBso= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220130045626-42a86de5afdd h1:9k+jJ+VnNrCWhgFlGZnWUdIUGa92esnpxSsfESqv8Ls= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220130045626-42a86de5afdd/go.mod h1:3Tqbi0TN3ZVEsMUD5DJNKC/fjpFAUwftg0KkZBcHXBg= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220130045626-42a86de5afdd h1:ybGDHKPkRG9wYJFa/5cQKg17svhWinwDSMfAspKeybM= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220130045626-42a86de5afdd/go.mod h1:rWP94CvU4sLrhD9xenELrHPV0mDbpT9q6PltC6BorH0= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220130045626-42a86de5afdd h1:Kb//LQG8AQN4UtjbwOkpjV9wksPnO1V7kHPhxKJVH0g= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220130045626-42a86de5afdd/go.mod h1:TIS/S5r7P8BI3d0utDzk89Y5V9BKAwado5sdPj24biM= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220130045626-42a86de5afdd h1:oMh+EMDsU45RsQU1wLV2WGxNPQfI6u5gNeFP7h5fi6M= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220130045626-42a86de5afdd/go.mod h1:nGjRbo4v5SWP3RYgUVbY6ZneIo3NxidwX5Dn1Hh74Bk= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220130045626-42a86de5afdd h1:OyBOadTwdP1XYKW9uIm4GUUjzwXXuUWDxZUBQWPMTis= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220130045626-42a86de5afdd/go.mod h1:XCqfYC96Tzg9Ficp7heLhX4njFehr/qKOdP6Fr3HWSI= github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a/go.mod h1:M/Gi/GUUrMdSS07nrYtTiK43J6/VUAyk/+IfN4ZqUY4= -github.com/openshift/library-go v0.0.0-20220315122757-21a67f25d837 h1:y5e1zbo4/r369GdviW2VSPA+yprvUrW/zlK03XSWWdA= -github.com/openshift/library-go v0.0.0-20220315122757-21a67f25d837/go.mod h1:6AmNM4N4nHftckybV/U7bQW+5AvK5TW81ndSI6KEidw= +github.com/openshift/library-go v0.0.0-20220111125907-7f25b9c7ad22 h1:yi4NoYekLpqHqatGMwashmyxui0mI3AcoWMPozuCZfA= +github.com/openshift/library-go v0.0.0-20220111125907-7f25b9c7ad22/go.mod h1:4UQ9snU1vg53fyTpHQw3vLPiAxI8ub5xrc+y8KPQQFs= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible h1:6XSBotNi58b4MwVV4F9o/jd4BaQd+uJyz+s5TR0/ot8= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible/go.mod h1:azqkkH4Vpp9A579CC26hicol/wViXag9rOwElif6v9E= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 79f9a66dff94..ee96c28d9a6f 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -453,8 +453,6 @@ var annotations = map[string]string{ "[Top Level] [sig-apps] Job should run a job to completion when tasks succeed": "should run a job to completion when tasks succeed [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-apps] Job should run a job to completion with CPU requests [Serial]": "should run a job to completion with CPU requests [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-apps] ReplicaSet Replace and Patch tests [Conformance]": "Replace and Patch tests [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance]": "Replicaset should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -1969,13 +1967,13 @@ var annotations = map[string]string{ "[Top Level] [sig-network] ClusterDns [Feature:Example] should create pod that uses dns": "should create pod that uses dns [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready": "should be able to preserve UDP traffic when initial unready endpoints get ready [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready": "should be able to preserve UDP traffic when initial unready endpoints get ready [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service": "should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": "should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] Conntrack should drop INVALID conntrack entries [Privileged]": "should drop INVALID conntrack entries [Privileged] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Conntrack should drop INVALID conntrack entries [Privileged]": "should drop INVALID conntrack entries [Privileged] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow][Serial]": "should be able to change stubDomain configuration [Slow][Serial] [Disabled:SpecialConfig] [Suite:k8s]", @@ -2097,7 +2095,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": "should allow egress access on one named port [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": "should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", @@ -2107,27 +2105,27 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": "should allow ingress access on one named port [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy] ": "should deny egress from all pods in a namespace [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy] ": "should deny egress from all pods in a namespace [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ": "should deny egress from pods based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ": "should deny egress from pods based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": "should deny ingress access to updated pod [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy]": "should deny ingress from pods on other namespaces [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]": "should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": "should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]": "should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", @@ -2159,7 +2157,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": "should enforce updated policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]": "should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", @@ -2167,17 +2165,17 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": "should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": "should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": "should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": "should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": "should support allow-all policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": "should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": "should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", - "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": "should work with Ingress, Egress specified together [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": "should work with Ingress, Egress specified together [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:k8s]", "[Top Level] [sig-network] Netpol [Feature:SCTPConnectivity][LinuxOnly][Disruptive] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": "should enforce policy based on Ports [Feature:NetworkPolicy] [Disabled:Broken] [Serial] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", @@ -2201,7 +2199,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": "should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": "should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2213,17 +2211,17 @@ var annotations = map[string]string{ "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": "should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": "should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]": "should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2243,19 +2241,19 @@ var annotations = map[string]string{ "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": "should enforce updated policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy]": "should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": "should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": "should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": "should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": "should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy]": "should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy]": "should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": "should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -2681,8 +2679,6 @@ var annotations = map[string]string{ "[Top Level] [sig-node] Pods Extended Delete Grace Period should be submitted and removed": "should be submitted and removed [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-node] Pods Extended Pod Container Status should never report container start when an init container fails": "should never report container start when an init container fails [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-node] Pods Extended Pod Container Status should never report success for a pending container": "should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done": "should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go new file mode 100644 index 000000000000..470ed3419a2c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go @@ -0,0 +1,129 @@ +package httprequest + +import ( + "net" + "net/http" + "strings" + + "github.com/munnerz/goautoneg" +) + +// PrefersHTML returns true if the request was made by something that looks like a browser, or can receive HTML +func PrefersHTML(req *http.Request) bool { + accepts := goautoneg.ParseAccept(req.Header.Get("Accept")) + acceptsHTML := false + acceptsJSON := false + for _, accept := range accepts { + if accept.Type == "text" && accept.SubType == "html" { + acceptsHTML = true + } else if accept.Type == "application" && accept.SubType == "json" { + acceptsJSON = true + } + } + + // If HTML is accepted, return true + if acceptsHTML { + return true + } + + // If JSON was specifically requested, return false + // This gives browsers a way to make requests and add an "Accept" header to request JSON + if acceptsJSON { + return false + } + + // In Intranet/Compatibility mode, IE sends an Accept header that does not contain "text/html". + if strings.HasPrefix(req.UserAgent(), "Mozilla") { + return true + } + + return false +} + +// SchemeHost returns the scheme and host used to make this request. +// Suitable for use to compute scheme/host in returned 302 redirect Location. +// Note the returned host is not normalized, and may or may not contain a port. +// Returned values are based on the following information: +// +// Host: +// * X-Forwarded-Host/X-Forwarded-Port headers +// * Host field on the request (parsed from Host header) +// * Host in the request's URL (parsed from Request-Line) +// +// Scheme: +// * X-Forwarded-Proto header +// * Existence of TLS information on the request implies https +// * Scheme in the request's URL (parsed from Request-Line) +// * Port (if included in calculated Host value, 443 implies https) +// * Otherwise, defaults to "http" +func SchemeHost(req *http.Request) (string /*scheme*/, string /*host*/) { + forwarded := func(attr string) string { + // Get the X-Forwarded- value + value := req.Header.Get("X-Forwarded-" + attr) + // Take the first comma-separated value, if multiple exist + value = strings.SplitN(value, ",", 2)[0] + // Trim whitespace + return strings.TrimSpace(value) + } + + hasExplicitHost := func(h string) bool { + _, _, err := net.SplitHostPort(h) + return err == nil + } + + forwardedHost := forwarded("Host") + host := "" + hostHadExplicitPort := false + switch { + case len(forwardedHost) > 0: + host = forwardedHost + hostHadExplicitPort = hasExplicitHost(host) + + // If both X-Forwarded-Host and X-Forwarded-Port are sent, use the explicit port info + if forwardedPort := forwarded("Port"); len(forwardedPort) > 0 { + if h, _, err := net.SplitHostPort(forwardedHost); err == nil { + host = net.JoinHostPort(h, forwardedPort) + } else { + host = net.JoinHostPort(forwardedHost, forwardedPort) + } + } + + case len(req.Host) > 0: + host = req.Host + hostHadExplicitPort = hasExplicitHost(host) + + case len(req.URL.Host) > 0: + host = req.URL.Host + hostHadExplicitPort = hasExplicitHost(host) + } + + port := "" + if _, p, err := net.SplitHostPort(host); err == nil { + port = p + } + + forwardedProto := forwarded("Proto") + scheme := "" + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case req.TLS != nil: + scheme = "https" + case len(req.URL.Scheme) > 0: + scheme = req.URL.Scheme + case port == "443": + scheme = "https" + default: + scheme = "http" + } + + if !hostHadExplicitPort { + if (scheme == "https" && port == "443") || (scheme == "http" && port == "80") { + if hostWithoutPort, _, err := net.SplitHostPort(host); err == nil { + host = hostWithoutPort + } + } + } + + return scheme, host +} diff --git a/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go b/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go deleted file mode 100644 index a440a04a1b2d..000000000000 --- a/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go +++ /dev/null @@ -1,229 +0,0 @@ -package openshiftrestmapper - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// defaultRESTMappings contains enough RESTMappings to have enough of the kube-controller-manager succeed when running -// against a kube-apiserver that cannot reach aggregated APIs to do a full mapping. This happens when the OwnerReferencesPermissionEnforcement -// admission plugin runs to confirm permissions. Don't add things just because you don't want to fail. These are here so that -// we can start enough back up to get the rest of the system working correctly. -var defaultRESTMappings = []meta.RESTMapping{ - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, - }, - // This is created so that cluster-bootstrap can always map securitycontextconstraints since the CRD doesn't have - // discovery. Discovery is delegated to the openshift-apiserver which doesn't not exist early in the bootstrapping - // phase. This leads to SCC related failures that we don't need to have. - { - GroupVersionKind: schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "SecurityContextConstraints"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"}, - }, - // This is created so that cluster-bootstrap can always map customresourcedefinitions, RBAC, machine resources so that CRDs and - // permissions are always created quickly. We observed discovery not including these on AWS OVN installations and - // the lack of CRDs and permissions blocked additional aspects of cluster bootstrapping. - { - GroupVersionKind: schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "Machine"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machines"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machinesets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machineconfiguration.openshift.io", Version: "v1", Kind: "MachineConfig"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "machineconfiguration.openshift.io", Version: "v1", Resource: "machineconfigs"}, - }, - // This is here so cluster-bootstrap can always create the config instances that are used to drive our operators to avoid the - // excessive bootstrap wait that prevents installer from completing on AWS OVN - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "DNS"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "dnses"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Infrastructure"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "infrastructures"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Network"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "networks"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Ingress"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "ingresses"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Proxy"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "proxies"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Scheduler"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "schedulers"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ClusterVersion"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "clusterversions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "CloudCredential"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "cloudcredentials"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}, - }, -} - -func NewOpenShiftHardcodedRESTMapper(delegate meta.RESTMapper) meta.RESTMapper { - ret := HardCodedFirstRESTMapper{ - Mapping: map[schema.GroupVersionKind]meta.RESTMapping{}, - RESTMapper: delegate, - } - for i := range defaultRESTMappings { - curr := defaultRESTMappings[i] - ret.Mapping[curr.GroupVersionKind] = curr - } - return ret -} - -// HardCodedFirstRESTMapper is a RESTMapper that will look for hardcoded mappings first, then delegate. -// This is done in service to `OwnerReferencesPermissionEnforcement` and for cluster-bootstrap. -type HardCodedFirstRESTMapper struct { - Mapping map[schema.GroupVersionKind]meta.RESTMapping - meta.RESTMapper -} - -var _ meta.RESTMapper = HardCodedFirstRESTMapper{} - -func (m HardCodedFirstRESTMapper) String() string { - return fmt.Sprintf("HardCodedRESTMapper{\n\t%v\n%v\n}", m.Mapping, m.RESTMapper) -} - -// RESTMapping is the only function called today. The first hit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement -// only ever calls with one version. -func (m HardCodedFirstRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - // not exactly one version, delegate - if len(versions) != 1 { - return m.RESTMapper.RESTMapping(gk, versions...) - } - gvk := gk.WithVersion(versions[0]) - - single, ok := m.Mapping[gvk] - // not handled, delegate - if !ok { - return m.RESTMapper.RESTMapping(gk, versions...) - } - - return &single, nil -} - -// RESTMapping is the only function called today. The firsthit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement -// only ever calls with one version. -func (m HardCodedFirstRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - // not exactly one version, delegate - if len(versions) != 1 { - return m.RESTMapper.RESTMappings(gk, versions...) - } - gvk := gk.WithVersion(versions[0]) - - single, ok := m.Mapping[gvk] - // not handled, delegate - if !ok { - return m.RESTMapper.RESTMappings(gk, versions...) - } - - return []*meta.RESTMapping{&single}, nil -} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go index edbd10fb1d3b..5cec68257b30 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -22,15 +22,8 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// ToLeaderElectionWithConfigmapLease returns a "configmapsleases" based leader -// election config that you just need to fill in the Callback for. -// It is compatible with a "configmaps" based leader election and -// paves the way toward using "leases" based leader election. -// See https://github.com/kubernetes/kubernetes/issues/107454 for -// details on how to migrate to "leases" leader election. -// Don't forget the callbacks! -// TODO: In the next version we should switch to using "leases" -func ToLeaderElectionWithConfigmapLease(clientConfig *rest.Config, config configv1.LeaderElection, component, identity string) (leaderelection.LeaderElectionConfig, error) { +// ToConfigMapLeaderElection returns a leader election config that you just need to fill in the Callback for. Don't forget the callbacks! +func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.LeaderElection, component, identity string) (leaderelection.LeaderElectionConfig, error) { kubeClient, err := kubernetes.NewForConfig(clientConfig) if err != nil { return leaderelection.LeaderElectionConfig{}, err @@ -57,7 +50,7 @@ func ToLeaderElectionWithConfigmapLease(clientConfig *rest.Config, config config eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) eventRecorder := eventBroadcaster.NewRecorder(clientgoscheme.Scheme, corev1.EventSource{Component: component}) rl, err := resourcelock.New( - resourcelock.ConfigMapsLeasesResourceLock, + resourcelock.ConfigMapsResourceLock, config.Namespace, config.Name, kubeClient.CoreV1(), diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go index 820892a17c53..f98b86f3d455 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -329,7 +329,7 @@ func (b *ControllerBuilder) Run(ctx context.Context, config *unstructured.Unstru leaderConfig := rest.CopyConfig(protoConfig) leaderConfig.Timeout = b.leaderElection.RenewDeadline.Duration - leaderElection, err := leaderelectionconverter.ToLeaderElectionWithConfigmapLease(leaderConfig, *b.leaderElection, b.componentName, b.instanceIdentity) + leaderElection, err := leaderelectionconverter.ToConfigMapLeaderElection(leaderConfig, *b.leaderElection, b.componentName, b.instanceIdentity) if err != nil { return err } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go index fafa39c4048c..67b6a615d084 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go @@ -7,60 +7,50 @@ import ( "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" admissionregistrationclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ApplyMutatingWebhookConfigurationImproved ensures the form of the specified +// ApplyMutatingWebhookConfiguration ensures the form of the specified // mutatingwebhookconfiguration is present in the API. If it does not exist, // it will be created. If it does exist, the metadata of the required // mutatingwebhookconfiguration will be merged with the existing mutatingwebhookconfiguration // and an update performed if the mutatingwebhookconfiguration spec and metadata differ from // the previously required spec and metadata based on generation change. -func ApplyMutatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.MutatingWebhookConfigurationsGetter, recorder events.Recorder, - requiredOriginal *admissionregistrationv1.MutatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.MutatingWebhookConfiguration, bool, error) { +func ApplyMutatingWebhookConfiguration(ctx context.Context, client admissionregistrationclientv1.MutatingWebhookConfigurationsGetter, recorder events.Recorder, + requiredOriginal *admissionregistrationv1.MutatingWebhookConfiguration) (*admissionregistrationv1.MutatingWebhookConfiguration, bool, error) { if requiredOriginal == nil { return nil, false, fmt.Errorf("Unexpected nil instead of an object") } + required := requiredOriginal.DeepCopy() - existing, err := client.MutatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{}) + existing, err := client.MutatingWebhookConfigurations().Get(ctx, required.GetName(), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - required := requiredOriginal.DeepCopy() actual, err := client.MutatingWebhookConfigurations().Create( ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.MutatingWebhookConfiguration), metav1.CreateOptions{}) reportCreateEvent(recorder, required, err) if err != nil { return nil, false, err } - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, actual) return actual, true, nil } else if err != nil { return nil, false, err } - if cache.SafeToSkipApply(requiredOriginal, existing) { - return existing, false, nil - } - - required := requiredOriginal.DeepCopy() modified := resourcemerge.BoolPtr(false) existingCopy := existing.DeepCopy() resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) - copyMutatingWebhookCABundle(existing, required) - webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks) - if webhooksEquivalent && !*modified { - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy) + if !*modified { return existingCopy, false, nil } // at this point we know that we're going to perform a write. We're just trying to get the object correct toWrite := existingCopy // shallow copy so the code reads easier + copyMutatingWebhookCABundle(existing, required) toWrite.Webhooks = required.Webhooks klog.V(4).Infof("MutatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite)) @@ -70,9 +60,7 @@ func ApplyMutatingWebhookConfigurationImproved(ctx context.Context, client admis if err != nil { return nil, false, err } - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, actual) - return actual, true, nil + return actual, *modified || actual.GetGeneration() > existingCopy.GetGeneration(), nil } // copyMutatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before @@ -90,52 +78,42 @@ func copyMutatingWebhookCABundle(from, to *admissionregistrationv1.MutatingWebho } } -// ApplyValidatingWebhookConfigurationImproved ensures the form of the specified +// ApplyValidatingWebhookConfiguration ensures the form of the specified // validatingwebhookconfiguration is present in the API. If it does not exist, // it will be created. If it does exist, the metadata of the required // validatingwebhookconfiguration will be merged with the existing validatingwebhookconfiguration // and an update performed if the validatingwebhookconfiguration spec and metadata differ from // the previously required spec and metadata based on generation change. -func ApplyValidatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.ValidatingWebhookConfigurationsGetter, recorder events.Recorder, - requiredOriginal *admissionregistrationv1.ValidatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.ValidatingWebhookConfiguration, bool, error) { +func ApplyValidatingWebhookConfiguration(ctx context.Context, client admissionregistrationclientv1.ValidatingWebhookConfigurationsGetter, recorder events.Recorder, + requiredOriginal *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, bool, error) { if requiredOriginal == nil { return nil, false, fmt.Errorf("Unexpected nil instead of an object") } + required := requiredOriginal.DeepCopy() - existing, err := client.ValidatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{}) + existing, err := client.ValidatingWebhookConfigurations().Get(ctx, required.GetName(), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - required := requiredOriginal.DeepCopy() actual, err := client.ValidatingWebhookConfigurations().Create( ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.ValidatingWebhookConfiguration), metav1.CreateOptions{}) reportCreateEvent(recorder, required, err) if err != nil { return nil, false, err } - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, actual) return actual, true, nil } else if err != nil { return nil, false, err } - if cache.SafeToSkipApply(requiredOriginal, existing) { - return existing, false, nil - } - - required := requiredOriginal.DeepCopy() modified := resourcemerge.BoolPtr(false) existingCopy := existing.DeepCopy() resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) - copyValidatingWebhookCABundle(existing, required) - webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks) - if webhooksEquivalent && !*modified { - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy) + if !*modified { return existingCopy, false, nil } // at this point we know that we're going to perform a write. We're just trying to get the object correct toWrite := existingCopy // shallow copy so the code reads easier + copyValidatingWebhookCABundle(existing, required) toWrite.Webhooks = required.Webhooks klog.V(4).Infof("ValidatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite)) @@ -145,9 +123,7 @@ func ApplyValidatingWebhookConfigurationImproved(ctx context.Context, client adm if err != nil { return nil, false, err } - // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy - cache.UpdateCachedResourceMetadata(requiredOriginal, actual) - return actual, true, nil + return actual, *modified || actual.GetGeneration() > existingCopy.GetGeneration(), nil } // copyValidatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go index 6f654250e4c3..c0a9fc8f4b20 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -206,13 +206,13 @@ func ApplyDirectly(ctx context.Context, clients *ClientHolder, recorder events.R if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") } else { - result.Result, result.Changed, result.Error = ApplyValidatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + result.Result, result.Changed, result.Error = ApplyValidatingWebhookConfiguration(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t) } case *admissionregistrationv1.MutatingWebhookConfiguration: if clients.kubeClient == nil { result.Error = fmt.Errorf("missing kubeClient") } else { - result.Result, result.Changed, result.Error = ApplyMutatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + result.Result, result.Changed, result.Error = ApplyMutatingWebhookConfiguration(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t) } case *storagev1.CSIDriver: if clients.kubeClient == nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go index 95e4f27a45cb..243b021e4bf1 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -2,13 +2,14 @@ package resourceapply import ( "context" - "fmt" storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storageclientv1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" "k8s.io/klog/v2" "github.com/openshift/library-go/pkg/operator/events" @@ -57,18 +58,39 @@ func ApplyStorageClass(ctx context.Context, client storageclientv1.StorageClasse return actual, true, err } -// ApplyCSIDriver merges objectmeta, does not worry about anything else -func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, requiredOriginal *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) { - - required := requiredOriginal.DeepCopy() - if required.Annotations == nil { - required.Annotations = map[string]string{} +// ApplyCSIDriverV1Beta1 merges objectmeta, does not worry about anything else +func ApplyCSIDriverV1Beta1(ctx context.Context, client storageclientv1beta1.CSIDriversGetter, recorder events.Recorder, required *storagev1beta1.CSIDriver) (*storagev1beta1.CSIDriver, bool, error) { + existing, err := client.CSIDrivers().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.CSIDrivers().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*storagev1beta1.CSIDriver), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err } - err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) if err != nil { return nil, false, err } + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("CSIDriver %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.CSIDrivers().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyCSIDriver merges objectmeta, does not worry about anything else +func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, required *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) { existing, err := client.CSIDrivers().Get(ctx, required.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { requiredCopy := required.DeepCopy() @@ -81,46 +103,21 @@ func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter return nil, false, err } - metadataModified := resourcemerge.BoolPtr(false) + modified := resourcemerge.BoolPtr(false) existingCopy := existing.DeepCopy() - resourcemerge.EnsureObjectMeta(metadataModified, &existingCopy.ObjectMeta, required.ObjectMeta) - requiredSpecHash := required.Annotations[specHashAnnotation] - existingSpecHash := existing.Annotations[specHashAnnotation] - sameSpec := requiredSpecHash == existingSpecHash - if sameSpec && !*metadataModified { - return existing, false, nil + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil } if klog.V(4).Enabled() { klog.Infof("CSIDriver %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) } - if sameSpec { - // Update metadata by a simple Update call - actual, err := client.CSIDrivers().Update(ctx, existingCopy, metav1.UpdateOptions{}) - reportUpdateEvent(recorder, required, err) - return actual, true, err - } - - existingCopy.Spec = required.Spec - existingCopy.ObjectMeta.ResourceVersion = "" - // Spec is read-only after creation. Delete and re-create the object - err = client.CSIDrivers().Delete(ctx, existingCopy.Name, metav1.DeleteOptions{}) - reportDeleteEvent(recorder, existingCopy, err, "Deleting CSIDriver to re-create it with updated parameters") - if err != nil && !apierrors.IsNotFound(err) { - return existing, false, err - } - actual, err := client.CSIDrivers().Create(ctx, existingCopy, metav1.CreateOptions{}) - if err != nil && apierrors.IsAlreadyExists(err) { - // Delete() few lines above did not really delete the object, - // the API server is probably waiting for a finalizer removal or so. - // Report an error, but something else than "Already exists", because - // that would be very confusing - Apply failed because the object - // already exists??? - err = fmt.Errorf("failed to re-create CSIDriver object %s, waiting for the original object to be deleted", existingCopy.Name) - } - reportCreateEvent(recorder, existingCopy, err) + // TODO: Spec is read-only, so this will fail if user changes it. Should we simply ignore it? + actual, err := client.CSIDrivers().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) return actual, true, err } diff --git a/vendor/github.com/openshift/library-go/test/library/metrics/query.go b/vendor/github.com/openshift/library-go/test/library/metrics/query.go index 8a993215f81b..de9f34625152 100644 --- a/vendor/github.com/openshift/library-go/test/library/metrics/query.go +++ b/vendor/github.com/openshift/library-go/test/library/metrics/query.go @@ -10,13 +10,15 @@ import ( "strings" "time" - routeclient "github.com/openshift/client-go/route/clientset/versioned" - prometheusapi "github.com/prometheus/client_golang/api" - prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/transport" + + prometheusapi "github.com/prometheus/client_golang/api" + prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1" + + routeclient "github.com/openshift/client-go/route/clientset/versioned" ) // NewPrometheusClient returns Prometheus API or error @@ -25,7 +27,7 @@ import ( // `ALERTS{alertname="PodDisruptionBudgetAtLimit",alertstate="pending",namespace="pdbnamespace",poddisruptionbudget="pdbname",prometheus="openshift-monitoring/k8s",service="kube-state-metrics",severity="warning"}==1` // Example query: // `scheduler_scheduling_duration_seconds_sum` -func NewPrometheusClient(ctx context.Context, kclient kubernetes.Interface, rc routeclient.Interface) (prometheusv1.API, error) { +func NewPrometheusClient(ctx context.Context, kclient *kubernetes.Clientset, rc *routeclient.Clientset) (prometheusv1.API, error) { _, err := kclient.CoreV1().Services("openshift-monitoring").Get(ctx, "prometheus-k8s", metav1.GetOptions{}) if err != nil { return nil, err @@ -56,7 +58,7 @@ func NewPrometheusClient(ctx context.Context, kclient kubernetes.Interface, rc r return createClient(ctx, kclient, host, bearerToken) } -func createClient(ctx context.Context, kclient kubernetes.Interface, host, bearerToken string) (prometheusv1.API, error) { +func createClient(ctx context.Context, kclient *kubernetes.Clientset, host, bearerToken string) (prometheusv1.API, error) { // retrieve router CA routerCAConfigMap, err := kclient.CoreV1().ConfigMaps("openshift-config-managed").Get(ctx, "default-ingress-cert", metav1.GetOptions{}) if err != nil { diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index 4f8240f1443f..67f0c04aa7ca 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -26,7 +26,6 @@ replace ( github.com/imdario/mergo => github.com/imdario/mergo v0.3.5 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.0.9 github.com/onsi/ginkgo => github.com/openshift/ginkgo v4.7.0-origin.0+incompatible - github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/vendor/k8s.io/cri-api/pkg/errors/doc.go b/vendor/k8s.io/cri-api/pkg/errors/doc.go deleted file mode 100644 index f3413ee98046..000000000000 --- a/vendor/k8s.io/cri-api/pkg/errors/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errors provides helper functions for use by the kubelet -// to deal with CRI errors. -package errors // import "k8s.io/cri-api/pkg/errors" diff --git a/vendor/k8s.io/cri-api/pkg/errors/errors.go b/vendor/k8s.io/cri-api/pkg/errors/errors.go deleted file mode 100644 index 41d7b92466d8..000000000000 --- a/vendor/k8s.io/cri-api/pkg/errors/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// IsNotFound returns a boolean indicating whether the error -// is grpc not found error. -// See https://github.com/grpc/grpc/blob/master/doc/statuscodes.md -// for a list of grpc status codes. -func IsNotFound(err error) bool { - s, ok := status.FromError(err) - if !ok { - return ok - } - if s.Code() == codes.NotFound { - return true - } - - return false -} diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index 0f103c047ea5..37540875513d 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -88,6 +88,9 @@ var ( // https://bugzilla.redhat.com/show_bug.cgi?id=1854379 `\[sig-storage\].*\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\].*subPath should be able to unmount after the subpath directory is deleted`, + // https://bugzilla.redhat.com/show_bug.cgi?id=1945329 + `should drop INVALID conntrack entries`, + // https://bugzilla.redhat.com/show_bug.cgi?id=1986306 `\[sig-cli\] Kubectl client kubectl wait should ignore not found error with --for=delete`, @@ -107,6 +110,9 @@ var ( `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`, `Topology Hints should distribute endpoints evenly`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=2034958 + `\[sig-network\] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready`, }, // tests that may work, but we don't support them "[Disabled:Unsupported]": { @@ -207,7 +213,12 @@ var ( // These are skipped explicitly by openshift-hack/test-kubernetes-e2e.sh, // but will also be skipped by openshift-tests in jobs that use openshift-sdn. "[Skipped:Network/OpenShiftSDN]": { + `NetworkPolicy.*IPBlock`, // feature is not supported by openshift-sdn + `NetworkPolicy.*[Ee]gress`, // feature is not supported by openshift-sdn `NetworkPolicy.*named port`, // feature is not supported by openshift-sdn + + `NetworkPolicy between server and client should support a 'default-deny-all' policy`, // uses egress feature + `NetworkPolicy between server and client should stop enforcing policies after they are deleted`, // uses egress feature }, } diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go index b10969a75f67..a399bc141f07 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go @@ -214,8 +214,9 @@ func (a *managementCPUsOverride) Admit(ctx context.Context, attr admission.Attri return admission.NewForbidden(attr, fmt.Errorf("%s the cluster does not have any nodes", PluginName)) } - // probably the workload feature disabled, because some cluster nodes do not have workload resource + // probably the workload feature disabled, because some of cluster nodes do not have workload resource if err := isManagementResourceAvailableForAllNodes(nodes, workloadType); err != nil { + pod.Annotations[workloadAdmissionWarning] = err.Error() return nil } diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go index a72b33876913..fb66eeceb345 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go @@ -91,8 +91,8 @@ func OpenShiftKubeAPIServerConfigPatch(genericConfig *genericapiserver.Config, k return nil }) genericConfig.BuildHandlerChainFunc, err = BuildHandlerChain( + enablement.OpenshiftConfig().ConsolePublicURL, enablement.OpenshiftConfig().AuthConfig.OAuthMetadataFile, - kubeInformers.Core().V1().ConfigMaps(), deprecatedAPIRequestController, ) if err != nil { diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go index 0bf0586b0d0f..85bdeb932c31 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go @@ -6,20 +6,15 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" genericapiserver "k8s.io/apiserver/pkg/server" - coreinformers "k8s.io/client-go/informers/core/v1" patchfilters "k8s.io/kubernetes/openshift-kube-apiserver/filters" "k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest" authorizationv1 "github.com/openshift/api/authorization/v1" -) - -const ( - openShiftConfigManagedNamespaceName = "openshift-config-managed" - consolePublicConfigMapName = "console-public" + "github.com/openshift/library-go/pkg/apiserver/httprequest" ) // TODO switch back to taking a kubeapiserver config. For now make it obviously safe for 3.11 -func BuildHandlerChain(oauthMetadataFile string, cmInformer coreinformers.ConfigMapInformer, deprecatedAPIRequestController deprecatedapirequest.APIRequestLogger) (func(apiHandler http.Handler, kc *genericapiserver.Config) http.Handler, error) { +func BuildHandlerChain(consolePublicURL string, oauthMetadataFile string, deprecatedAPIRequestController deprecatedapirequest.APIRequestLogger) (func(apiHandler http.Handler, kc *genericapiserver.Config) http.Handler, error) { // load the oauthmetadata when we can return an error oAuthMetadata := []byte{} if len(oauthMetadataFile) > 0 { @@ -44,7 +39,7 @@ func BuildHandlerChain(oauthMetadataFile string, cmInformer coreinformers.Config handler = translateLegacyScopeImpersonation(handler) // redirects from / and /console to consolePublicURL if you're using a browser - handler = withConsoleRedirect(handler, cmInformer) + handler = withConsoleRedirect(handler, consolePublicURL) return handler }, @@ -74,28 +69,19 @@ func withOAuthInfo(handler http.Handler, oAuthMetadata []byte) http.Handler { // If we know the location of the asset server, redirect to it when / is requested // and the Accept header supports text/html -func withConsoleRedirect(handler http.Handler, cmInformer coreinformers.ConfigMapInformer) http.Handler { - cmLister := cmInformer.Lister() - informer := cmInformer.Informer() - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if !strings.HasPrefix(req.URL.Path, "/console") { - // Dispatch to the next handler - handler.ServeHTTP(w, req) - return - } +func withConsoleRedirect(handler http.Handler, consolePublicURL string) http.Handler { + if len(consolePublicURL) == 0 { + return handler + } - consoleUrl := "" - if informer.HasSynced() { - consolePublicConfig, err := cmLister.ConfigMaps(openShiftConfigManagedNamespaceName).Get(consolePublicConfigMapName) - if err == nil { - consoleUrl = consolePublicConfig.Data["consoleURL"] - } - } - if len(consoleUrl) > 0 { - http.Redirect(w, req, consoleUrl, http.StatusFound) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if strings.HasPrefix(req.URL.Path, "/console") || + (req.URL.Path == "/" && httprequest.PrefersHTML(req)) { + http.Redirect(w, req, consolePublicURL, http.StatusFound) return } - http.Error(w, "redirection failed: console URL not found", http.StatusInternalServerError) + // Dispatch to the next handler + handler.ServeHTTP(w, req) }) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/patch_restmapper.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/patch_restmapper.go index f3980a82cca3..7f465c519539 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/patch_restmapper.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/patch_restmapper.go @@ -1,11 +1,108 @@ package admission import ( - "k8s.io/apimachinery/pkg/api/meta" + "fmt" - "github.com/openshift/library-go/pkg/client/openshiftrestmapper" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" ) +// defaultRESTMappings contains enough RESTMappings to have enough of the kube-controller-manager succeed when running +// against a kube-apiserver that cannot reach aggregated APIs to do a full mapping. This happens when the OwnerReferencesPermissionEnforcement +// admission plugin runs to confirm permissions. Don't add things just because you don't want to fail. These are here so that +// we can start enough back up to get the rest of the system working correctly. +var defaultRESTMappings = []meta.RESTMapping{ + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, + }, + { + GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, + Scope: meta.RESTScopeNamespace, + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, + }, +} + func NewAdmissionRESTMapper(delegate meta.RESTMapper) meta.RESTMapper { - return openshiftrestmapper.NewOpenShiftHardcodedRESTMapper(delegate) + ret := HardCodedFirstRESTMapper{ + Mapping: map[schema.GroupVersionKind]meta.RESTMapping{}, + RESTMapper: delegate, + } + for i := range defaultRESTMappings { + curr := defaultRESTMappings[i] + ret.Mapping[curr.GroupVersionKind] = curr + } + return ret +} + +// HardCodedFirstRESTMapper is a RESTMapper that will look for hardcoded mappings first, then delegate. +// This is done in service to `OwnerReferencesPermissionEnforcement` +type HardCodedFirstRESTMapper struct { + Mapping map[schema.GroupVersionKind]meta.RESTMapping + meta.RESTMapper +} + +var _ meta.RESTMapper = HardCodedFirstRESTMapper{} + +func (m HardCodedFirstRESTMapper) String() string { + return fmt.Sprintf("HardCodedRESTMapper{\n\t%v\n%v\n}", m.Mapping, m.RESTMapper) +} + +// RESTMapping is the only function called today. The firsthit restmapper ought to make this work right. OwnerReferencesPermissionEnforcement +// only ever calls with one version. +func (m HardCodedFirstRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + // not exactly one version, delegate + if len(versions) != 1 { + return m.RESTMapper.RESTMappings(gk, versions...) + } + gvk := gk.WithVersion(versions[0]) + + single, ok := m.Mapping[gvk] + // not handled, delegate + if !ok { + return m.RESTMapper.RESTMappings(gk, versions...) + } + + return []*meta.RESTMapping{&single}, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 3ad57ab1bb7b..655ffe9f2b35 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -1529,26 +1529,15 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { } // syncPod is the transaction script for the sync of a single pod (setting up) -// a pod. This method is reentrant and expected to converge a pod towards the -// desired state of the spec. The reverse (teardown) is handled in -// syncTerminatingPod and syncTerminatedPod. If syncPod exits without error, -// then the pod runtime state is in sync with the desired configuration state -// (pod is running). If syncPod exits with a transient error, the next -// invocation of syncPod is expected to make progress towards reaching the -// runtime state. syncPod exits with isTerminal when the pod was detected to -// have reached a terminal lifecycle phase due to container exits (for -// RestartNever or RestartOnFailure) and the next method invoked will by -// syncTerminatingPod. +// a pod. The reverse (teardown) is handled in syncTerminatingPod and +// syncTerminatedPod. If syncPod exits without error, then the pod runtime +// state is in sync with the desired configuration state (pod is running). +// If syncPod exits with a transient error, the next invocation of syncPod +// is expected to make progress towards reaching the runtime state. // // Arguments: // -// updateType - whether this is a create (first time) or an update, should -// only be used for metrics since this method must be reentrant -// pod - the pod that is being set up -// mirrorPod - the mirror pod known to the kubelet for this pod, if any -// podStatus - the most recent pod status observed for this pod which can -// be used to determine the set of actions that should be taken during -// this loop of syncPod +// o - the SyncPodOptions for this invocation // // The workflow is: // * If the pod is being created, record pod worker start latency @@ -1556,9 +1545,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // * If the pod is being seen as running for the first time, record pod // start latency // * Update the status of the pod in the status manager -// * Stop the pod's containers if it should not be running due to soft -// admission -// * Ensure any background tracking for a runnable pod is started +// * Kill the pod if it should not be running due to soft admission // * Create a mirror pod if the pod is a static pod, and does not // already have a mirror pod // * Create the data directories for the pod if they do not exist @@ -1572,12 +1559,10 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // // This operation writes all events that are dispatched in order to provide // the most accurate information possible about an error situation to aid debugging. -// Callers should not write an event if this operation returns an error. -func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) { +// Callers should not throw an event if this operation returns an error. +func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) error { klog.V(4).InfoS("syncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID) - defer func() { - klog.V(4).InfoS("syncPod exit", "pod", klog.KObj(pod), "podUID", pod.UID, "isTerminal", isTerminal) - }() + defer klog.V(4).InfoS("syncPod exit", "pod", klog.KObj(pod), "podUID", pod.UID) // Latency measurements for the main workflow are relative to the // first time the pod was seen by the API server. @@ -1609,17 +1594,11 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType for _, ipInfo := range apiPodStatus.PodIPs { podStatus.IPs = append(podStatus.IPs, ipInfo.IP) } + if len(podStatus.IPs) == 0 && len(apiPodStatus.PodIP) > 0 { podStatus.IPs = []string{apiPodStatus.PodIP} } - // If the pod is terminal, we don't need to continue to setup the pod - if apiPodStatus.Phase == v1.PodSucceeded || apiPodStatus.Phase == v1.PodFailed { - kl.statusManager.SetPodStatus(pod, apiPodStatus) - isTerminal = true - return isTerminal, nil - } - // If the pod should not be running, we request the pod's containers be stopped. This is not the same // as termination (we want to stop the pod, but potentially restart it later if soft admission allows // it later). Set the status and phase appropriately @@ -1668,13 +1647,13 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType // Return an error to signal that the sync loop should back off. syncErr = fmt.Errorf("pod cannot be run: %s", runnable.Message) } - return false, syncErr + return syncErr } // If the network plugin is not ready, only start the pod if it uses the host network if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, err) - return false, fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, err) + return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, err) } // Create Cgroups for the pod and apply resource parameters @@ -1721,7 +1700,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType } if err := pcm.EnsureExists(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err) - return false, fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err) + return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err) } } } @@ -1762,7 +1741,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType if err := kl.makePodDataDirs(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err) klog.ErrorS(err, "Unable to make pod data directories for pod", "pod", klog.KObj(pod)) - return false, err + return err } // Volume manager will not mount volumes for terminating pods @@ -1772,7 +1751,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err) klog.ErrorS(err, "Unable to attach or mount volumes for pod; skipping pod", "pod", klog.KObj(pod)) - return false, err + return err } } @@ -1786,15 +1765,16 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType // Do not return error if the only failures were pods in backoff for _, r := range result.SyncResults { if r.Error != kubecontainer.ErrCrashLoopBackOff && r.Error != images.ErrImagePullBackOff { - // local to container runtime, so we get better errors. - return false, err + // Do not record an event here, as we keep all event logging for sync pod failures + // local to container runtime so we get better errors + return err } } - return false, nil + return nil } - return false, nil + return nil } // syncTerminatingPod is expected to terminate all running containers in a pod. Once this method diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index bc1e5ea514bc..ba0ab34b5c7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -918,12 +918,6 @@ func countRunningContainerStatus(status v1.PodStatus) int { return runningContainers } -// PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running -// containers. This returns false if the pod has not yet been started or the pod is unknown. -func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool { - return kl.podWorkers.CouldHaveRunningContainers(pod.UID) -} - // PodResourcesAreReclaimed returns true if all required node-level resources that a pod was consuming have // been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server. func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { @@ -1439,7 +1433,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { } // generateAPIPodStatus creates the final API pod status for a pod, given the -// internal pod status. This method should only be called from within sync*Pod methods. +// internal pod status. func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 3434b51a4427..bf5fcc733ba5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -34,8 +34,6 @@ import ( "sync" "time" - crierror "k8s.io/cri-api/pkg/errors" - grpcstatus "google.golang.org/grpc/status" "github.com/armon/circbuf" @@ -504,17 +502,10 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n return nil, err } - statuses := []*kubecontainer.Status{} + statuses := make([]*kubecontainer.Status, len(containers)) // TODO: optimization: set maximum number of containers per container name to examine. - for _, c := range containers { + for i, c := range containers { status, err := m.runtimeService.ContainerStatus(c.Id) - // Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal. - // The previous call (ListContainers) never fails due to a pod container not existing. - // Therefore, this method should not either, but instead act as if the previous call failed, - // which means the error should be ignored. - if crierror.IsNotFound(err) { - continue - } if err != nil { // Merely log this here; GetPodStatus will actually report the error out. klog.V(4).InfoS("ContainerStatus return error", "containerID", c.Id, "err", err) @@ -547,7 +538,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n cStatus.Message += tMessage } } - statuses = append(statuses, cStatus) + statuses[i] = cStatus } sort.Sort(containerStatusByCreated(statuses)) @@ -730,16 +721,15 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) err := m.runtimeService.StopContainer(containerID.ID, gracePeriod) - if err != nil && !crierror.IsNotFound(err) { + if err != nil { klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) - return err + } else { + klog.V(3).InfoS("Container exited normally", "pod", klog.KObj(pod), "podUID", pod.UID, + "containerName", containerName, "containerID", containerID.String()) } - klog.V(3).InfoS("Container exited normally", "pod", klog.KObj(pod), "podUID", pod.UID, - "containerName", containerName, "containerID", containerID.String()) - - return nil + return err } // killContainersWithSyncResult kills all pod's containers with sync results. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 4f1c21150506..7f4f82aacb66 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -25,7 +25,6 @@ import ( "time" cadvisorapi "github.com/google/cadvisor/info/v1" - crierror "k8s.io/cri-api/pkg/errors" "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" @@ -1008,7 +1007,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo result.AddSyncResult(killSandboxResult) // Stop all sandboxes belongs to same pod for _, podSandbox := range runningPod.Sandboxes { - if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) { + if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil { killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID) } @@ -1050,22 +1049,15 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp klog.V(4).InfoS("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod)) - sandboxStatuses := []*runtimeapi.PodSandboxStatus{} + sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs)) podIPs := []string{} for idx, podSandboxID := range podSandboxIDs { podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) - // Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal. - // The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing. - // Therefore, this method should not either, but instead act as if the previous call failed, - // which means the error should be ignored. - if crierror.IsNotFound(err) { - continue - } if err != nil { klog.ErrorS(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod)) return nil, err } - sandboxStatuses = append(sandboxStatuses, podSandboxStatus) + sandboxStatuses[idx] = podSandboxStatus // Only get pod IP from latest sandbox if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go index 5632745e0603..e5678d3c1daf 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -218,7 +218,7 @@ type PodWorkers interface { } // the function to invoke to perform a sync (reconcile the kubelet state to the desired shape of the pod) -type syncPodFnType func(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) +type syncPodFnType func(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) error // the function to invoke to terminate a pod (ensure no running processes are present) type syncTerminatingPodFnType func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, runningPod *kubecontainer.Pod, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error @@ -392,11 +392,6 @@ type podWorkers struct { syncTerminatingPodFn syncTerminatingPodFnType syncTerminatedPodFn syncTerminatedPodFnType - // workerChannelFn is exposed for testing to allow unit tests to impose delays - // in channel communication. The function is invoked once each time a new worker - // goroutine starts. - workerChannelFn func(uid types.UID, in chan podWork) (out <-chan podWork) - // The EventRecorder to use recorder record.EventRecorder @@ -704,8 +699,9 @@ func (p *podWorkers) UpdatePod(options UpdatePodOptions) { } // start the pod worker goroutine if it doesn't exist - podUpdates, exists := p.podUpdates[uid] - if !exists { + var podUpdates chan podWork + var exists bool + if podUpdates, exists = p.podUpdates[uid]; !exists { // We need to have a buffer here, because checkForUpdates() method that // puts an update into channel is called from the same goroutine where // the channel is consumed. However, it is guaranteed that in such case @@ -719,21 +715,13 @@ func (p *podWorkers) UpdatePod(options UpdatePodOptions) { append(p.waitingToStartStaticPodsByFullname[status.fullname], uid) } - // allow testing of delays in the pod update channel - var outCh <-chan podWork - if p.workerChannelFn != nil { - outCh = p.workerChannelFn(uid, podUpdates) - } else { - outCh = podUpdates - } - // Creating a new pod worker either means this is a new pod, or that the // kubelet just restarted. In either case the kubelet is willing to believe // the status of the pod for the first pod worker sync. See corresponding // comment in syncPod. go func() { defer runtime.HandleCrash() - p.managePodLoop(outCh) + p.managePodLoop(podUpdates) }() } @@ -797,31 +785,28 @@ func calculateEffectiveGracePeriod(status *podSyncStatus, pod *v1.Pod, options * } // allowPodStart tries to start the pod and returns true if allowed, otherwise -// it requeues the pod and returns false. If the pod will never be able to start -// because data is missing, or the pod was terminated before start, canEverStart -// is false. -func (p *podWorkers) allowPodStart(pod *v1.Pod) (canStart bool, canEverStart bool) { +// it requeues the pod and returns false. +func (p *podWorkers) allowPodStart(pod *v1.Pod) bool { if !kubetypes.IsStaticPod(pod) { - // TODO: Do we want to allow non-static pods with the same full name? + // TBD: Do we want to allow non-static pods with the same full name? // Note that it may disable the force deletion of pods. - return true, true + return true } p.podLock.Lock() defer p.podLock.Unlock() status, ok := p.podSyncStatuses[pod.UID] if !ok { - klog.ErrorS(nil, "Pod sync status does not exist, the worker should not be running", "pod", klog.KObj(pod), "podUID", pod.UID) - return false, false - } - if status.IsTerminationRequested() { - return false, false + klog.ErrorS(nil, "Failed to get a valid podSyncStatuses", "pod", klog.KObj(pod), "podUID", pod.UID) + p.workQueue.Enqueue(pod.UID, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor)) + status.working = false + return false } if !p.allowStaticPodStart(status.fullname, pod.UID) { p.workQueue.Enqueue(pod.UID, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor)) status.working = false - return false, true + return false } - return true, true + return true } // allowStaticPodStart tries to start the static pod and returns true if @@ -834,12 +819,9 @@ func (p *podWorkers) allowStaticPodStart(fullname string, uid types.UID) bool { } waitingPods := p.waitingToStartStaticPodsByFullname[fullname] - // TODO: This is O(N) with respect to the number of updates to static pods - // with overlapping full names, and ideally would be O(1). for i, waitingUID := range waitingPods { // has pod already terminated or been deleted? - status, ok := p.podSyncStatuses[waitingUID] - if !ok || status.IsTerminationRequested() || status.IsTerminated() { + if _, ok := p.podSyncStatuses[waitingUID]; !ok { continue } // another pod is next in line @@ -865,20 +847,8 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan podWork) { var podStarted bool for update := range podUpdates { pod := update.Options.Pod - - // Decide whether to start the pod. If the pod was terminated prior to the pod being allowed - // to start, we have to clean it up and then exit the pod worker loop. if !podStarted { - canStart, canEverStart := p.allowPodStart(pod) - if !canEverStart { - p.completeUnstartedTerminated(pod) - if start := update.Options.StartTime; !start.IsZero() { - metrics.PodWorkerDuration.WithLabelValues("terminated").Observe(metrics.SinceInSeconds(start)) - } - klog.V(4).InfoS("Processing pod event done", "pod", klog.KObj(pod), "podUID", pod.UID, "updateType", update.WorkType) - return - } - if !canStart { + if !p.allowPodStart(pod) { klog.V(4).InfoS("Pod cannot start yet", "pod", klog.KObj(pod), "podUID", pod.UID) continue } @@ -886,7 +856,6 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan podWork) { } klog.V(4).InfoS("Processing pod event", "pod", klog.KObj(pod), "podUID", pod.UID, "updateType", update.WorkType) - var isTerminal bool err := func() error { // The worker is responsible for ensuring the sync method sees the appropriate // status updates on resyncs (the result of the last sync), transitions to @@ -933,14 +902,13 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan podWork) { err = p.syncTerminatingPodFn(ctx, pod, status, update.Options.RunningPod, gracePeriod, podStatusFn) default: - isTerminal, err = p.syncPodFn(ctx, update.Options.UpdateType, pod, update.Options.MirrorPod, status) + err = p.syncPodFn(ctx, update.Options.UpdateType, pod, update.Options.MirrorPod, status) } lastSyncTime = time.Now() return err }() - var phaseTransition bool switch { case err == context.Canceled: // when the context is cancelled we expect an update to already be queued @@ -971,17 +939,10 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan podWork) { } // otherwise we move to the terminating phase p.completeTerminating(pod) - phaseTransition = true - - case isTerminal: - // if syncPod indicated we are now terminal, set the appropriate pod status to move to terminating - klog.V(4).InfoS("Pod is terminal", "pod", klog.KObj(pod), "podUID", pod.UID, "updateType", update.WorkType) - p.completeSync(pod) - phaseTransition = true } - // queue a retry if necessary, then put the next event in the channel if any - p.completeWork(pod, phaseTransition, err) + // queue a retry for errors if necessary, then put the next event in the channel if any + p.completeWork(pod, err) if start := update.Options.StartTime; !start.IsZero() { metrics.PodWorkerDuration.WithLabelValues(update.Options.UpdateType.String()).Observe(metrics.SinceInSeconds(start)) } @@ -1012,33 +973,6 @@ func (p *podWorkers) acknowledgeTerminating(pod *v1.Pod) PodStatusFunc { return nil } -// completeSync is invoked when syncPod completes successfully and indicates the pod is now terminal and should -// be terminated. This happens when the natural pod lifecycle completes - any pod which is not RestartAlways -// exits. Unnatural completions, such as evictions, API driven deletion or phase transition, are handled by -// UpdatePod. -func (p *podWorkers) completeSync(pod *v1.Pod) { - p.podLock.Lock() - defer p.podLock.Unlock() - - klog.V(4).InfoS("Pod indicated lifecycle completed naturally and should now terminate", "pod", klog.KObj(pod), "podUID", pod.UID) - - if status, ok := p.podSyncStatuses[pod.UID]; ok { - if status.terminatingAt.IsZero() { - status.terminatingAt = time.Now() - } else { - klog.V(4).InfoS("Pod worker attempted to set terminatingAt twice, likely programmer error", "pod", klog.KObj(pod), "podUID", pod.UID) - } - status.startedTerminating = true - } - - p.lastUndeliveredWorkUpdate[pod.UID] = podWork{ - WorkType: TerminatingPodWork, - Options: UpdatePodOptions{ - Pod: pod, - }, - } -} - // completeTerminating is invoked when syncTerminatingPod completes successfully, which means // no container is running, no container will be started in the future, and we are ready for // cleanup. This updates the termination state which prevents future syncs and will ensure @@ -1093,7 +1027,12 @@ func (p *podWorkers) completeTerminatingRuntimePod(pod *v1.Pod) { } } - p.cleanupPodUpdates(pod.UID) + ch, ok := p.podUpdates[pod.UID] + if ok { + close(ch) + } + delete(p.podUpdates, pod.UID) + delete(p.lastUndeliveredWorkUpdate, pod.UID) } // completeTerminated is invoked after syncTerminatedPod completes successfully and means we @@ -1104,7 +1043,12 @@ func (p *podWorkers) completeTerminated(pod *v1.Pod) { klog.V(4).InfoS("Pod is complete and the worker can now stop", "pod", klog.KObj(pod), "podUID", pod.UID) - p.cleanupPodUpdates(pod.UID) + ch, ok := p.podUpdates[pod.UID] + if ok { + close(ch) + } + delete(p.podUpdates, pod.UID) + delete(p.lastUndeliveredWorkUpdate, pod.UID) if status, ok := p.podSyncStatuses[pod.UID]; ok { if status.terminatingAt.IsZero() { @@ -1122,40 +1066,11 @@ func (p *podWorkers) completeTerminated(pod *v1.Pod) { } } -// completeUnstartedTerminated is invoked if a pod that has never been started receives a termination -// signal before it can be started. -func (p *podWorkers) completeUnstartedTerminated(pod *v1.Pod) { - p.podLock.Lock() - defer p.podLock.Unlock() - - klog.V(4).InfoS("Pod never started and the worker can now stop", "pod", klog.KObj(pod), "podUID", pod.UID) - - p.cleanupPodUpdates(pod.UID) - - if status, ok := p.podSyncStatuses[pod.UID]; ok { - if status.terminatingAt.IsZero() { - klog.V(4).InfoS("Pod worker is complete but did not have terminatingAt set, likely programmer error", "pod", klog.KObj(pod), "podUID", pod.UID) - } - if !status.terminatedAt.IsZero() { - klog.V(4).InfoS("Pod worker is complete and had terminatedAt set, likely programmer error", "pod", klog.KObj(pod), "podUID", pod.UID) - } - status.finished = true - status.working = false - status.terminatedAt = time.Now() - - if p.startedStaticPodsByFullname[status.fullname] == pod.UID { - delete(p.startedStaticPodsByFullname, status.fullname) - } - } -} - // completeWork requeues on error or the next sync interval and then immediately executes any pending // work. -func (p *podWorkers) completeWork(pod *v1.Pod, phaseTransition bool, syncErr error) { +func (p *podWorkers) completeWork(pod *v1.Pod, syncErr error) { // Requeue the last update if the last sync returned error. switch { - case phaseTransition: - p.workQueue.Enqueue(pod.UID, 0) case syncErr == nil: // No error; requeue at the regular resync interval. p.workQueue.Enqueue(pod.UID, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor)) @@ -1235,10 +1150,10 @@ func (p *podWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorke return workers } -// removeTerminatedWorker cleans up and removes the worker status for a worker -// that has reached a terminal state of "finished" - has successfully exited -// syncTerminatedPod. This "forgets" a pod by UID and allows another pod to be -// recreated with the same UID. +// removeTerminatedWorker cleans up and removes the worker status for a worker that +// has reached a terminal state of "finished" - has successfully exited +// syncTerminatedPod. This "forgets" a pod by UID and allows another pod to be recreated +// with the same UID. func (p *podWorkers) removeTerminatedWorker(uid types.UID) { status, ok := p.podSyncStatuses[uid] if !ok { @@ -1247,6 +1162,11 @@ func (p *podWorkers) removeTerminatedWorker(uid types.UID) { return } + if startedUID, started := p.startedStaticPodsByFullname[status.fullname]; started && startedUID != uid { + klog.V(4).InfoS("Pod cannot start yet but is no longer known to the kubelet, finish it", "podUID", uid) + status.finished = true + } + if !status.finished { klog.V(4).InfoS("Pod worker has been requested for removal but is still not fully terminated", "podUID", uid) return @@ -1258,7 +1178,8 @@ func (p *podWorkers) removeTerminatedWorker(uid types.UID) { klog.V(4).InfoS("Pod has been terminated and is no longer known to the kubelet, remove all history", "podUID", uid) } delete(p.podSyncStatuses, uid) - p.cleanupPodUpdates(uid) + delete(p.podUpdates, uid) + delete(p.lastUndeliveredWorkUpdate, uid) if p.startedStaticPodsByFullname[status.fullname] == uid { delete(p.startedStaticPodsByFullname, status.fullname) @@ -1309,15 +1230,3 @@ func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.K } } } - -// cleanupPodUpdates closes the podUpdates channel and removes it from -// podUpdates map so that the corresponding pod worker can stop. It also -// removes any undelivered work. This method must be called holding the -// pod lock. -func (p *podWorkers) cleanupPodUpdates(uid types.UID) { - if ch, ok := p.podUpdates[uid]; ok { - close(ch) - } - delete(p.podUpdates, uid) - delete(p.lastUndeliveredWorkUpdate, uid) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go b/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go index 00f3022af5a8..19b8a4f6a7b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go @@ -112,10 +112,9 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results [] // runPod runs a single pod and wait until all containers are running. func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { - var isTerminal bool delay := retryDelay retry := 0 - for !isTerminal { + for { status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) @@ -132,7 +131,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod)) } mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - if isTerminal, err = kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil { + if err = kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil { return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err) } if retry >= runOnceMaxRetries { @@ -144,7 +143,6 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { retry++ delay *= runOnceRetryDelayBackoff } - return nil } // isPodRunning returns true if all containers of a manifest are running. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 8e853b3e01fb..051f0edda908 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -83,10 +83,8 @@ type PodStatusProvider interface { // PodDeletionSafetyProvider provides guarantees that a pod can be safely deleted. type PodDeletionSafetyProvider interface { - // PodResourcesAreReclaimed returns true if the pod can safely be deleted. + // A function which returns true if the pod can safely be deleted PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool - // PodCouldHaveRunningContainers returns true if the pod could have running containers. - PodCouldHaveRunningContainers(pod *v1.Pod) bool } // Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with @@ -326,14 +324,6 @@ func findContainerStatus(status *v1.PodStatus, containerID string) (containerSta } -// TerminatePod ensures that the status of containers is properly defaulted at the end of the pod -// lifecycle. As the Kubelet must reconcile with the container runtime to observe container status -// there is always the possibility we are unable to retrieve one or more container statuses due to -// garbage collection, admin action, or loss of temporary data on a restart. This method ensures -// that any absent container status is treated as a failure so that we do not incorrectly describe -// the pod as successful. If we have not yet initialized the pod in the presence of init containers, -// the init container failure status is sufficient to describe the pod as failing, and we do not need -// to override waiting containers (unless there is evidence the pod previously started those containers). func (m *manager) TerminatePod(pod *v1.Pod) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() @@ -345,26 +335,19 @@ func (m *manager) TerminatePod(pod *v1.Pod) { oldStatus = &cachedStatus.status } status := *oldStatus.DeepCopy() - - // once a pod has initialized, any missing status is treated as a failure - if hasPodInitialized(pod) { - for i := range status.ContainerStatuses { - if status.ContainerStatuses[i].State.Terminated != nil { - continue - } - status.ContainerStatuses[i].State = v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - Reason: "ContainerStatusUnknown", - Message: "The container could not be located when the pod was terminated", - ExitCode: 137, - }, - } + for i := range status.ContainerStatuses { + if status.ContainerStatuses[i].State.Terminated != nil { + continue + } + status.ContainerStatuses[i].State = v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "ContainerStatusUnknown", + Message: "The container could not be located when the pod was terminated", + ExitCode: 137, + }, } } - - // all but the final suffix of init containers which have no evidence of a container start are - // marked as failed containers - for i := range initializedContainers(status.InitContainerStatuses) { + for i := range status.InitContainerStatuses { if status.InitContainerStatuses[i].State.Terminated != nil { continue } @@ -381,49 +364,6 @@ func (m *manager) TerminatePod(pod *v1.Pod) { m.updateStatusInternal(pod, status, true) } -// hasPodInitialized returns true if the pod has no evidence of ever starting a regular container, which -// implies those containers should not be transitioned to terminated status. -func hasPodInitialized(pod *v1.Pod) bool { - // a pod without init containers is always initialized - if len(pod.Spec.InitContainers) == 0 { - return true - } - // if any container has ever moved out of waiting state, the pod has initialized - for _, status := range pod.Status.ContainerStatuses { - if status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { - return true - } - } - // if the last init container has ever completed with a zero exit code, the pod is initialized - if l := len(pod.Status.InitContainerStatuses); l > 0 { - container := pod.Status.InitContainerStatuses[l-1] - if state := container.LastTerminationState; state.Terminated != nil && state.Terminated.ExitCode == 0 { - return true - } - if state := container.State; state.Terminated != nil && state.Terminated.ExitCode == 0 { - return true - } - } - // otherwise the pod has no record of being initialized - return false -} - -// initializedContainers returns all status except for suffix of containers that are in Waiting -// state, which is the set of containers that have attempted to start at least once. If all containers -// are Watiing, the first container is always returned. -func initializedContainers(containers []v1.ContainerStatus) []v1.ContainerStatus { - for i := len(containers) - 1; i >= 0; i-- { - if containers[i].State.Waiting == nil || containers[i].LastTerminationState.Terminated != nil { - return containers[0 : i+1] - } - } - // always return at least one container - if len(containers) > 0 { - return containers[0:1] - } - return nil -} - // checkContainerStateTransition ensures that no container is trying to transition // from a terminated to non-terminated state, which is illegal and indicates a // logical error in the kubelet. @@ -679,9 +619,8 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { return } - mergedStatus := mergePodStatus(pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod)) - - newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, mergedStatus) + oldStatus := pod.Status.DeepCopy() + newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, pod.UID, *oldStatus, mergePodStatus(*oldStatus, status.status)) klog.V(3).InfoS("Patch status for pod", "pod", klog.KObj(pod), "patch", string(patchBytes)) if err != nil { @@ -691,7 +630,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { if unchanged { klog.V(3).InfoS("Status for pod is up-to-date", "pod", klog.KObj(pod), "statusVersion", status.version) } else { - klog.V(3).InfoS("Status for pod updated successfully", "pod", klog.KObj(pod), "statusVersion", status.version, "status", mergedStatus) + klog.V(3).InfoS("Status for pod updated successfully", "pod", klog.KObj(pod), "statusVersion", status.version, "status", status.status) pod = newPod } @@ -832,49 +771,25 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { return status } -// mergePodStatus merges oldPodStatus and newPodStatus to preserve where pod conditions -// not owned by kubelet and to ensure terminal phase transition only happens after all -// running containers have terminated. This method does not modify the old status. -func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus { - podConditions := make([]v1.PodCondition, 0, len(oldPodStatus.Conditions)+len(newPodStatus.Conditions)) - +// mergePodStatus merges oldPodStatus and newPodStatus where pod conditions +// not owned by kubelet is preserved from oldPodStatus +func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus) v1.PodStatus { + podConditions := []v1.PodCondition{} for _, c := range oldPodStatus.Conditions { if !kubetypes.PodConditionByKubelet(c.Type) { podConditions = append(podConditions, c) } } + for _, c := range newPodStatus.Conditions { if kubetypes.PodConditionByKubelet(c.Type) { podConditions = append(podConditions, c) } } newPodStatus.Conditions = podConditions - - // Delay transitioning a pod to a terminal status unless the pod is actually terminal. - // The Kubelet should never transition a pod to terminal status that could have running - // containers and thus actively be leveraging exclusive resources. Note that resources - // like volumes are reconciled by a subsystem in the Kubelet and will converge if a new - // pod reuses an exclusive resource (unmount -> free -> mount), which means we do not - // need wait for those resources to be detached by the Kubelet. In general, resources - // the Kubelet exclusively owns must be released prior to a pod being reported terminal, - // while resources that have participanting components above the API use the pod's - // transition to a terminal phase (or full deletion) to release those resources. - if !isPhaseTerminal(oldPodStatus.Phase) && isPhaseTerminal(newPodStatus.Phase) { - if couldHaveRunningContainers { - newPodStatus.Phase = oldPodStatus.Phase - newPodStatus.Reason = oldPodStatus.Reason - newPodStatus.Message = oldPodStatus.Message - } - } - return newPodStatus } -// isPhaseTerminal returns true if the pod's phase is terminal. -func isPhaseTerminal(phase v1.PodPhase) bool { - return phase == v1.PodFailed || phase == v1.PodSucceeded -} - // NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile func NeedToReconcilePodReadiness(pod *v1.Pod) bool { if len(pod.Spec.ReadinessGates) == 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go index 08f143bd1f22..363cdc8f1ea6 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go @@ -356,11 +356,6 @@ func (r *REST) beginUpdate(ctx context.Context, obj, oldObj runtime.Object, opti newSvc := obj.(*api.Service) oldSvc := oldObj.(*api.Service) - // Make sure the existing object has all fields we expect to be defaulted. - // This might not be true if the saved object predates these fields (the - // Decorator hook is not called on 'old' in the update path. - r.defaultOnReadService(oldSvc) - // Fix up allocated values that the client may have not specified (for // idempotence). patchAllocatedValues(After{newSvc}, Before{oldSvc}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go index 3fceec41f4d9..0c5be7417d00 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go @@ -46,7 +46,6 @@ import ( e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" "k8s.io/utils/pointer" @@ -71,7 +70,6 @@ var _ = SIGDescribe("Aggregator", func() { }) f := framework.NewDefaultFramework("aggregator") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // We want namespace initialization BeforeEach inserted by // NewDefaultFramework to happen before this, so we put this BeforeEach diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/apply.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/apply.go index 5e51780cac42..471f13043719 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/apply.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/apply.go @@ -35,7 +35,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" @@ -45,7 +44,6 @@ import ( var _ = SIGDescribe("ServerSideApply", func() { f := framework.NewDefaultFramework("apply") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var client clientset.Interface var ns string diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go index 9c8ebefafd6d..4898c8375a6a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go @@ -38,7 +38,6 @@ import ( e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/pointer" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -118,7 +117,6 @@ var alternativeAPIVersions = []apiextensionsv1.CustomResourceDefinitionVersion{ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", func() { var certCtx *certContext f := framework.NewDefaultFramework("crd-webhook") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline servicePort := int32(9443) containerPort := int32(9444) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go index 733044710aab..c608878a92b3 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go @@ -43,7 +43,6 @@ import ( e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -302,7 +301,6 @@ func getUniqLabel(labelkey, labelvalue string) map[string]string { var _ = SIGDescribe("Garbage collector", func() { f := framework.NewDefaultFramework("gc") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go index d728f9e8c695..640b0061d9aa 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -101,7 +100,6 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) var _ = SIGDescribe("Generated clientset", func() { f := framework.NewDefaultFramework("clientset") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() { podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) ginkgo.By("constructing the pod") diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go index f14f490f4210..bf32faeb3314 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "k8s.io/apimachinery/pkg/types" @@ -227,7 +226,6 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { var _ = SIGDescribe("Namespaces [Serial]", func() { f := framework.NewDefaultFramework("namespaces") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.11 diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go index 3f7bfd78723f..c561e962c719 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/resource_quota.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -52,7 +51,6 @@ var extendedResourceName = "example.com/dongle" var _ = SIGDescribe("ResourceQuota", func() { f := framework.NewDefaultFramework("resourcequota") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.16 @@ -917,7 +915,6 @@ var _ = SIGDescribe("ResourceQuota", func() { var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { f := framework.NewDefaultFramework("scope-selectors") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func() { ginkgo.By("Creating a ResourceQuota with best effort scope") resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) @@ -1098,7 +1095,6 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { f := framework.NewDefaultFramework("resourcequota-priorityclass") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { @@ -1424,7 +1420,6 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { var _ = SIGDescribe("ResourceQuota", func() { f := framework.NewDefaultFramework("cross-namespace-pod-affinity") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func() { ginkgo.By("Creating a ResourceQuota with cross namespace pod affinity scope") quota, err := createResourceQuota( diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go index 4f092b5f20e7..959ea522a6da 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/client-go/util/workqueue" - admissionapi "k8s.io/pod-security-admission/api" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/cli-runtime/pkg/printers" @@ -44,7 +43,6 @@ var serverPrintVersion = utilversion.MustParseSemantic("v1.10.0") var _ = SIGDescribe("Servers with support for Table transformation", func() { f := framework.NewDefaultFramework("tables") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery()) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go index 83ede28d7178..360993e3889b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go @@ -47,7 +47,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" @@ -79,7 +78,6 @@ const ( var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { var certCtx *certContext f := framework.NewDefaultFramework("webhook") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline servicePort := int32(8443) containerPort := int32(8444) @@ -1157,8 +1155,6 @@ func testWebhook(f *framework.Framework) { Labels: map[string]string{ skipNamespaceLabelKey: skipNamespaceLabelValue, f.UniqueName: "true", - // TODO(https://github.com/kubernetes/kubernetes/issues/108298): route namespace creation via framework.Framework.CreateNamespace in 1.24 - admissionapi.EnforceLevelLabel: string(admissionapi.LevelRestricted), }, }}) framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName) @@ -2373,12 +2369,8 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certConte func createWebhookConfigurationReadyNamespace(f *framework.Framework) { ns, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: f.Namespace.Name + "-markers", - Labels: map[string]string{ - f.UniqueName + "-markers": "true", - // TODO(https://github.com/kubernetes/kubernetes/issues/108298): route namespace creation via framework.Framework.CreateNamespace in 1.24 - admissionapi.EnforceLevelLabel: string(admissionapi.LevelRestricted), - }, + Name: f.Namespace.Name + "-markers", + Labels: map[string]string{f.UniqueName + "-markers": "true"}, }, }, metav1.CreateOptions{}) framework.ExpectNoError(err, "creating namespace for webhook configuration ready markers") diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go b/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go index 8431eefab0cc..23eacbe22340 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go @@ -43,7 +43,6 @@ import ( e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -53,7 +52,6 @@ const ( var _ = SIGDescribe("CronJob", func() { f := framework.NewDefaultFramework("cronjob") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline sleepCommand := []string{"sleep", "300"} diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go index 56d523de3236..646235e8c61d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go @@ -55,7 +55,6 @@ import ( e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -134,7 +133,6 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }) f = framework.NewDefaultFramework("daemonsets") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline image := WebserverImage dsName := "daemon-set" diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go index c22dfb3199ac..95e7aea7db65 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go @@ -61,7 +61,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutil "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" utilpointer "k8s.io/utils/pointer" ) @@ -86,7 +85,6 @@ var _ = SIGDescribe("Deployment", func() { }) f := framework.NewDefaultFramework("deployment") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { c = f.ClientSet diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go b/vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go index e456bb926f55..c10d0588da5f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go @@ -46,7 +46,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) // schedulingTimeout is longer specifically because sometimes we need to wait @@ -63,7 +62,6 @@ var defaultLabels = map[string]string{"foo": "bar"} var _ = SIGDescribe("DisruptionController", func() { f := framework.NewDefaultFramework("disruption") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ns string var cs kubernetes.Interface var dc dynamic.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/job.go b/vendor/k8s.io/kubernetes/test/e2e/apps/job.go index 756f606caaa1..469d0060ce93 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/job.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/job.go @@ -25,7 +25,6 @@ import ( batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -36,8 +35,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" - "k8s.io/kubernetes/test/e2e/scheduling" - admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/pointer" "github.com/onsi/ginkgo" @@ -46,13 +43,8 @@ import ( var _ = SIGDescribe("Job", func() { f := framework.NewDefaultFramework("job") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged parallelism := int32(2) completions := int32(4) - - largeParallelism := int32(90) - largeCompletions := int32(90) - backoffLimit := int32(6) // default value // Simplest case: N pods succeed @@ -369,52 +361,6 @@ var _ = SIGDescribe("Job", func() { framework.ExpectEqual(pod.Status.Phase, v1.PodFailed) } }) - - ginkgo.It("should run a job to completion with CPU requests [Serial]", func() { - ginkgo.By("Creating a job that with CPU requests") - - testNodeName := scheduling.GetNodeThatCanRunPod(f) - targetNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{}) - framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName) - - cpu, ok := targetNode.Status.Allocatable[v1.ResourceCPU] - if !ok { - framework.Failf("Unable to get node's %q cpu", targetNode.Name) - } - - cpuRequest := fmt.Sprint(int64(0.2 * float64(cpu.Value()))) - - backoff := 0 - ginkgo.By("Creating a job") - job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, largeParallelism, largeCompletions, nil, int32(backoff)) - for i := range job.Spec.Template.Spec.Containers { - job.Spec.Template.Spec.Containers[i].Resources = v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpuRequest), - }, - } - job.Spec.Template.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": testNodeName} - } - - framework.Logf("Creating job %q with a node hostname selector %q wth cpu request %q", job.Name, testNodeName, cpuRequest) - job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) - framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) - - ginkgo.By("Ensuring job reaches completions") - err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, largeCompletions) - framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) - - ginkgo.By("Ensuring pods for job exist") - pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name) - framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name) - successes := int32(0) - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodSucceeded { - successes++ - } - } - framework.ExpectEqual(successes, largeCompletions, "expected %d successful job pods, but got %d", largeCompletions, successes) - }) }) // waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/rc.go b/vendor/k8s.io/kubernetes/test/e2e/apps/rc.go index dfa63be39bed..289e6232872c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/rc.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/rc.go @@ -41,14 +41,12 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("ReplicationController", func() { f := framework.NewDefaultFramework("replication-controller") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var ns string var dc dynamic.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go b/vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go index 61c87fc04cf0..f312914150b1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go @@ -45,7 +45,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -101,7 +100,6 @@ func newPodQuota(name, number string) *v1.ResourceQuota { var _ = SIGDescribe("ReplicaSet", func() { f := framework.NewDefaultFramework("replicaset") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go b/vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go index 6a02483da83d..6a347e171b17 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go @@ -51,7 +51,6 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -89,7 +88,6 @@ var httpProbe = &v1.Probe{ // GCE Api requirements: nodes and master need storage r/w permissions. var _ = SIGDescribe("StatefulSet", func() { f := framework.NewDefaultFramework("statefulset") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ns string var c clientset.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/ttl_after_finished.go b/vendor/k8s.io/kubernetes/test/e2e/apps/ttl_after_finished.go index 4e5b0e2248d8..f4aabe3106cc 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/ttl_after_finished.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/ttl_after_finished.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/test/e2e/framework" e2ejob "k8s.io/kubernetes/test/e2e/framework/job" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -43,7 +42,6 @@ const ( var _ = SIGDescribe("TTLAfterFinished", func() { f := framework.NewDefaultFramework("ttlafterfinished") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("job should be deleted once it finishes after TTL seconds", func() { testFinishedJob(f) diff --git a/vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go b/vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go index adc08201370c..564b08decf0f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go +++ b/vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" imageutil "k8s.io/kubernetes/test/utils/image" @@ -31,7 +30,6 @@ import ( var _ = SIGDescribe("Metadata Concealment", func() { f := framework.NewDefaultFramework("metadata-concealment") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should run a check-metadata-concealment job to completion", func() { e2eskipper.SkipUnlessProviderIs("gce") diff --git a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go index e9ffde1fd155..d6ba011775e5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go +++ b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/cluster/ports" "k8s.io/kubernetes/test/e2e/framework" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -37,7 +36,6 @@ import ( var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { f := framework.NewDefaultFramework("node-authn") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var ns string var nodeIPs []string ginkgo.BeforeEach(func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go index 58dc308d6624..77b472adaa33 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go +++ b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go @@ -29,7 +29,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -42,7 +41,6 @@ const ( var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { f := framework.NewDefaultFramework("node-authz") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // client that will impersonate a node var c clientset.Interface var ns string diff --git a/vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go b/vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go index 897b6b6b2afc..2dbfb78dea49 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go +++ b/vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go @@ -41,7 +41,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" utilptr "k8s.io/utils/pointer" "github.com/onsi/ginkgo" @@ -49,7 +48,6 @@ import ( var _ = SIGDescribe("ServiceAccounts", func() { f := framework.NewDefaultFramework("svcaccounts") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should ensure a single API token exists", func() { // wait for the service account to reference a single secret diff --git a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go index d518cabb1b2f..fd1bdde27458 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -17,7 +17,6 @@ limitations under the License. package autoscaling import ( - "k8s.io/pod-security-admission/api" "time" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,7 +30,6 @@ import ( // var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() { f := framework.NewDefaultFramework("horizontal-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = api.LevelBaseline titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5" titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1" diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/network/networking.go b/vendor/k8s.io/kubernetes/test/e2e/common/network/networking.go index 7534cc2872ec..79f9c0f23551 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/network/networking.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/network/networking.go @@ -22,12 +22,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = SIGDescribe("Networking", func() { f := framework.NewDefaultFramework("pod-network-test") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("Granular Checks: Pods", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/configmap.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/configmap.go index 8e2514f4ff7f..b0ee3cbdcf75 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/configmap.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/configmap.go @@ -27,14 +27,12 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("ConfigMap", func() { f := framework.NewDefaultFramework("configmap") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/container_probe.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/container_probe.go index e7e7e182387b..14ec8956c6dc 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/container_probe.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/container_probe.go @@ -37,7 +37,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -51,7 +50,6 @@ const ( var _ = SIGDescribe("Probing container", func() { f := framework.NewDefaultFramework("container-probe") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient probe := webserverProbeBuilder{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/docker_containers.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/docker_containers.go index b630579ae6f5..9bd8ba685cad 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/docker_containers.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/docker_containers.go @@ -23,12 +23,10 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = SIGDescribe("Docker Containers", func() { f := framework.NewDefaultFramework("containers") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/downwardapi.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/downwardapi.go index 7d0801e4ad3a..eea5004222d9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/downwardapi.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/downwardapi.go @@ -28,14 +28,12 @@ import ( e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Downward API", func() { f := framework.NewDefaultFramework("downward-api") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged /* Release: v1.9 @@ -289,7 +287,6 @@ var _ = SIGDescribe("Downward API", func() { var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages]", func() { f := framework.NewDefaultFramework("downward-api") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Downward API tests for hugepages", func() { ginkgo.BeforeEach(func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/ephemeral_containers.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/ephemeral_containers.go index 295d816a3791..601577757c9f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/ephemeral_containers.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/ephemeral_containers.go @@ -27,7 +27,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -35,7 +34,6 @@ import ( var _ = SIGDescribe("Ephemeral Containers [NodeFeature:EphemeralContainers]", func() { f := framework.NewDefaultFramework("ephemeral-containers-test") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/expansion.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/expansion.go index afdf240a126e..05ed90e6c186 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/expansion.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/expansion.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -33,7 +32,6 @@ import ( // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/expansion.md var _ = SIGDescribe("Variable Expansion", func() { f := framework.NewDefaultFramework("var-expansion") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/init_container.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/init_container.go index 498f21b7f355..7164cbffd7c5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/init_container.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/init_container.go @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) func recordEvents(events []watch.Event, f func(watch.Event) (bool, error)) func(watch.Event) (bool, error) { @@ -159,7 +158,6 @@ func initContainersInvariants(pod *v1.Pod) error { var _ = SIGDescribe("InitContainer [NodeConformance]", func() { f := framework.NewDefaultFramework("init-container") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet.go index c3a16ea67c01..8e5d722ec6e0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet.go @@ -27,7 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -35,7 +34,6 @@ import ( var _ = SIGDescribe("Kubelet", func() { f := framework.NewDefaultFramework("kubelet-test") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet_etc_hosts.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet_etc_hosts.go index 4b6002852b8f..1e0c860d1739 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet_etc_hosts.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/kubelet_etc_hosts.go @@ -26,7 +26,6 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -46,7 +45,6 @@ type KubeletManagedHostConfig struct { var _ = SIGDescribe("KubeletManagedEtcHosts", func() { f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged config := &KubeletManagedHostConfig{ f: f, } diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go index 5704b22fe25d..587fb85d4121 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go @@ -28,7 +28,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -36,7 +35,6 @@ import ( var _ = SIGDescribe("Container Lifecycle Hook", func() { f := framework.NewDefaultFramework("container-lifecycle-hook") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient const ( podCheckInterval = 1 * time.Second diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/pods.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/pods.go index 3d2b97935abc..ff97f9ebb062 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/pods.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/pods.go @@ -51,7 +51,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2ewebsocket "k8s.io/kubernetes/test/e2e/framework/websocket" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -184,7 +183,6 @@ func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf var _ = SIGDescribe("Pods", func() { f := framework.NewDefaultFramework("pods") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient var dc dynamic.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/privileged.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/privileged.go index d9b8c8ee8f1a..6f63b2022619 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/privileged.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/privileged.go @@ -24,7 +24,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) // PrivilegedPodTestConfig is configuration struct for privileged pod test @@ -40,10 +39,8 @@ type PrivilegedPodTestConfig struct { } var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() { - f := framework.NewDefaultFramework("e2e-privileged-pod") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged config := &PrivilegedPodTestConfig{ - f: f, + f: framework.NewDefaultFramework("e2e-privileged-pod"), privilegedPod: "privileged-pod", privilegedContainer: "privileged-container", notPrivilegedContainer: "not-privileged-container", diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/runtime.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/runtime.go index e0671382cb79..4f3a6e8e810f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/runtime.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/runtime.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -38,7 +37,6 @@ import ( var _ = SIGDescribe("Container Runtime", func() { f := framework.NewDefaultFramework("container-runtime") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Describe("blackbox test", func() { ginkgo.Context("when starting a container that exits", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/runtimeclass.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/runtimeclass.go index ca6174d83381..083f210a5223 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/runtimeclass.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/runtimeclass.go @@ -36,14 +36,12 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("RuntimeClass", func() { f := framework.NewDefaultFramework("runtimeclass") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should reject a Pod requesting a non-existent RuntimeClass [NodeFeature:RuntimeHandler]", func() { rcName := f.Namespace.Name + "-nonexistent" diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/secrets.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/secrets.go index 87f403b01882..bcf60799b518 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/secrets.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/secrets.go @@ -30,12 +30,10 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = SIGDescribe("Secrets", func() { f := framework.NewDefaultFramework("secrets") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/security_context.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/security_context.go index 1f5255179515..a87378fcfa9f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/security_context.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/security_context.go @@ -29,7 +29,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/pointer" "github.com/onsi/ginkgo" @@ -43,7 +42,6 @@ var ( var _ = SIGDescribe("Security Context", func() { f := framework.NewDefaultFramework("security-context-test") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/node/sysctl.go b/vendor/k8s.io/kubernetes/test/e2e/common/node/sysctl.go index 97cd2e27a19e..dc86a38a1cd6 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/node/sysctl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/node/sysctl.go @@ -26,7 +26,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -40,7 +39,6 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { }) f := framework.NewDefaultFramework("sysctl") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var podClient *framework.PodClient testPod := func() *v1.Pod { diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/configmap_volume.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/configmap_volume.go index 8e4d278ad934..999987551f2f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/configmap_volume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/configmap_volume.go @@ -31,12 +31,10 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = SIGDescribe("ConfigMap", func() { f := framework.NewDefaultFramework("configmap") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go index 82cbb5e67169..bb442fcf613c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go @@ -28,7 +28,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -38,7 +37,6 @@ var _ = SIGDescribe("Downward API volume", func() { // How long to wait for a log pod to be displayed const podLogTimeout = 3 * time.Minute f := framework.NewDefaultFramework("downward-api") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/empty_dir.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/empty_dir.go index e0cdcea2406d..06f7b1b3a177 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/empty_dir.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/empty_dir.go @@ -31,7 +31,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -44,7 +43,6 @@ var ( var _ = SIGDescribe("EmptyDir volumes", func() { f := framework.NewDefaultFramework("emptydir") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup]", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/host_path.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/host_path.go index f56ff5f0485c..11e2f194e3eb 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/host_path.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/host_path.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -34,7 +33,6 @@ import ( //This will require some smart. var _ = SIGDescribe("HostPath", func() { f := framework.NewDefaultFramework("hostpath") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { // TODO permission denied cleanup failures diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_combined.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_combined.go index 4f88b6830096..6d4969382fe1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_combined.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_combined.go @@ -25,14 +25,12 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Projected combined", func() { f := framework.NewDefaultFramework("projected") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // Test multiple projections /* diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_configmap.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_configmap.go index 4c006f844257..37f51279da7a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_configmap.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_configmap.go @@ -28,7 +28,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -36,7 +35,6 @@ import ( var _ = SIGDescribe("Projected configMap", func() { f := framework.NewDefaultFramework("projected") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go index 3f3c2afd1c10..3e89d9902770 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go @@ -27,7 +27,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -35,7 +34,6 @@ import ( var _ = SIGDescribe("Projected downwardAPI", func() { f := framework.NewDefaultFramework("projected") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // How long to wait for a log pod to be displayed const podLogTimeout = 2 * time.Minute diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_secret.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_secret.go index 43557b6fd1d7..af23f516b7e8 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_secret.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/projected_secret.go @@ -27,7 +27,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -35,7 +34,6 @@ import ( var _ = SIGDescribe("Projected secret", func() { f := framework.NewDefaultFramework("projected") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/secrets_volume.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/secrets_volume.go index bdd44c0bf242..d138f54b18e0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/secrets_volume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/secrets_volume.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -36,7 +35,6 @@ import ( var _ = SIGDescribe("Secrets", func() { f := framework.NewDefaultFramework("secrets") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/storage/volumes.go b/vendor/k8s.io/kubernetes/test/e2e/common/storage/volumes.go index 9b151fcfb9b1..8b06118d73f4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/storage/volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/storage/volumes.go @@ -52,7 +52,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -60,7 +59,6 @@ import ( // TODO(#99468): Check if these tests are still needed. var _ = SIGDescribe("Volumes", func() { f := framework.NewDefaultFramework("volume") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged // note that namespace deletion is handled by delete-namespace flag // filled in BeforeEach diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go index 3d94657f94dc..3fc95bb212bb 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go @@ -47,7 +47,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" scaleclient "k8s.io/client-go/scale" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -79,12 +78,11 @@ type Framework struct { ScalesGetter scaleclient.ScalesGetter - SkipNamespaceCreation bool // Whether to skip creating a namespace - Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped - namespacesToDelete []*v1.Namespace // Some tests have more than one. - NamespaceDeletionTimeout time.Duration - SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace - NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied. + SkipNamespaceCreation bool // Whether to skip creating a namespace + Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped + namespacesToDelete []*v1.Namespace // Some tests have more than one. + NamespaceDeletionTimeout time.Duration + SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace gatherer *ContainerResourceGatherer // Constraints that passed to a check which is executed after data is gathered to @@ -520,23 +518,6 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) ( if createTestingNS == nil { createTestingNS = CreateTestingNS } - - if labels == nil { - labels = make(map[string]string) - } else { - labelsCopy := make(map[string]string) - for k, v := range labels { - labelsCopy[k] = v - } - labels = labelsCopy - } - - enforceLevel := admissionapi.LevelRestricted - if f.NamespacePodSecurityEnforceLevel != "" { - enforceLevel = f.NamespacePodSecurityEnforceLevel - } - labels[admissionapi.EnforceLevelLabel] = string(enforceLevel) - ns, err := createTestingNS(baseName, f.ClientSet, labels) // check ns instead of err to see if it's nil as we may // fail to create serviceAccount in it. diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go index af1a59034878..fb034e4b316e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go @@ -21,7 +21,6 @@ import ( v1 "k8s.io/api/core/v1" imageutils "k8s.io/kubernetes/test/utils/image" - "k8s.io/utils/pointer" ) // NodeOSDistroIs returns true if the distro is the same as `--node-os-distro` @@ -114,19 +113,3 @@ func GetLinuxLabel() *v1.SELinuxOptions { return &v1.SELinuxOptions{ Level: "s0:c0,c1"} } - -// GetRestrictedPodSecurityContext returns a minimal restricted pod security context. -func GetRestrictedPodSecurityContext() *v1.PodSecurityContext { - return &v1.PodSecurityContext{ - RunAsNonRoot: pointer.BoolPtr(true), - SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}, - } -} - -// GetRestrictedContainerSecurityContext returns a minimal restricted container security context. -func GetRestrictedContainerSecurityContext() *v1.SecurityContext { - return &v1.SecurityContext{ - AllowPrivilegeEscalation: pointer.BoolPtr(false), - Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}}, - } -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go index efd74f2ad617..a7d15c710b4c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go @@ -75,7 +75,6 @@ import ( testutils "k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" uexec "k8s.io/utils/exec" "k8s.io/utils/pointer" @@ -227,7 +226,6 @@ func runKubectlRetryOrDie(ns string, args ...string) string { var _ = SIGDescribe("Kubectl client", func() { defer ginkgo.GinkgoRecover() f := framework.NewDefaultFramework("kubectl") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // Reusable cluster state function. This won't be adversely affected by lazy initialization of framework. clusterState := func() *framework.ClusterVerification { diff --git a/vendor/k8s.io/kubernetes/test/e2e/kubectl/portforward.go b/vendor/k8s.io/kubernetes/test/e2e/kubectl/portforward.go index 108501b1b437..486ba97a2663 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/kubectl/portforward.go +++ b/vendor/k8s.io/kubernetes/test/e2e/kubectl/portforward.go @@ -42,7 +42,6 @@ import ( e2ewebsocket "k8s.io/kubernetes/test/e2e/framework/websocket" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -449,7 +448,6 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { var _ = SIGDescribe("Kubectl Port forwarding", func() { f := framework.NewDefaultFramework("port-forwarding") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Describe("With a server listening on 0.0.0.0", func() { ginkgo.Describe("that expects a client request", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/conntrack.go b/vendor/k8s.io/kubernetes/test/e2e/network/conntrack.go index 2abaffcc23a6..ee45c50a5e63 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/conntrack.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/conntrack.go @@ -35,7 +35,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -68,7 +67,6 @@ const ( var _ = common.SIGDescribe("Conntrack", func() { fr := framework.NewDefaultFramework("conntrack") - fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged type nodeInfo struct { name string diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/dns.go b/vendor/k8s.io/kubernetes/test/e2e/network/dns.go index c66c5e9cb34b..1dc3bb75950b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/dns.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/dns.go @@ -30,7 +30,6 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -40,7 +39,6 @@ const dnsTestServiceName = "dns-test-service" var _ = common.SIGDescribe("DNS", func() { f := framework.NewDefaultFramework("dns") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/endpointslice.go b/vendor/k8s.io/kubernetes/test/e2e/network/endpointslice.go index 6af20d273189..1caf3a95d953 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/endpointslice.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/endpointslice.go @@ -36,14 +36,12 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = common.SIGDescribe("EndpointSlice", func() { f := framework.NewDefaultFramework("endpointslice") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var cs clientset.Interface var podClient *framework.PodClient diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/hostport.go b/vendor/k8s.io/kubernetes/test/e2e/network/hostport.go index dec0d3599494..11fd3f7a6725 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/hostport.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/hostport.go @@ -33,13 +33,11 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = common.SIGDescribe("HostPort", func() { f := framework.NewDefaultFramework("hostport") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( cs clientset.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/ingress.go b/vendor/k8s.io/kubernetes/test/e2e/network/ingress.go index 0293a0b74655..281df81626ab 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/ingress.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/ingress.go @@ -42,7 +42,6 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -59,7 +58,6 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { conformanceTests []e2eingress.ConformanceTests ) f := framework.NewDefaultFramework("ingress") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { jig = e2eingress.NewIngressTestJig(f.ClientSet) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go b/vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go index 3364cff00332..82357de0732c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/kube_proxy.go @@ -34,7 +34,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" netutils "k8s.io/utils/net" "github.com/onsi/ginkgo" @@ -49,7 +48,6 @@ var _ = common.SIGDescribe("KubeProxy", func() { ) fr := framework.NewDefaultFramework("kube-proxy") - fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should set TCP CLOSE_WAIT timeout [Privileged]", func() { nodes, err := e2enode.GetBoundedReadySchedulableNodes(fr.ClientSet, 2) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/loadbalancer.go b/vendor/k8s.io/kubernetes/test/e2e/network/loadbalancer.go index 7f3cad739b90..7fca34602502 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/loadbalancer.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/loadbalancer.go @@ -44,7 +44,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" gcecloud "k8s.io/legacy-cloud-providers/gce" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -52,7 +51,6 @@ import ( var _ = common.SIGDescribe("LoadBalancers", func() { f := framework.NewDefaultFramework("loadbalancers") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var cs clientset.Interface serviceLBNames := []string{} @@ -852,7 +850,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() { var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { f := framework.NewDefaultFramework("esipp") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var loadBalancerCreateTimeout time.Duration var cs clientset.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/kubemanager.go b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/kubemanager.go index 00e72e196a03..ddb0ee3e3454 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/kubemanager.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/kubemanager.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - admissionapi "k8s.io/pod-security-admission/api" ) // kubeManager provides a convenience interface to kube functionality that we leverage for polling NetworkPolicy connections. @@ -165,7 +164,6 @@ func (k *kubeManager) executeRemoteCommand(namespace string, pod string, contain // createNamespace is a convenience function for namespace setup. func (k *kubeManager) createNamespace(ns *v1.Namespace) (*v1.Namespace, error) { - enforcePodSecurityBaseline(ns) createdNamespace, err := k.clientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("unable to update namespace %s: %w", ns.Name, err) @@ -254,7 +252,6 @@ func (k *kubeManager) setNamespaceLabels(ns string, labels map[string]string) er return err } selectedNameSpace.ObjectMeta.Labels = labels - enforcePodSecurityBaseline(selectedNameSpace) _, err = k.clientSet.CoreV1().Namespaces().Update(context.TODO(), selectedNameSpace, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("unable to update namespace %s: %w", ns, err) @@ -272,11 +269,3 @@ func (k *kubeManager) deleteNamespaces(namespaces []string) error { } return nil } - -func enforcePodSecurityBaseline(ns *v1.Namespace) { - if len(ns.ObjectMeta.Labels) == 0 { - ns.ObjectMeta.Labels = make(map[string]string) - } - // TODO(https://github.com/kubernetes/kubernetes/issues/108298): route namespace creation via framework.Framework.CreateNamespace - ns.ObjectMeta.Labels[admissionapi.EnforceLevelLabel] = string(admissionapi.LevelBaseline) -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/model.go b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/model.go index f209ea54899f..51cf16f225a7 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/model.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/model.go @@ -171,9 +171,7 @@ func (ns *Namespace) Spec() *v1.Namespace { // LabelSelector returns the default labels that should be placed on a namespace // in order for it to be uniquely selectable by label selectors func (ns *Namespace) LabelSelector() map[string]string { - return map[string]string{ - "ns": ns.Name, - } + return map[string]string{"ns": ns.Name} } // Pod is the abstract representation of what matters to network policy tests for diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/network_legacy.go b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/network_legacy.go index d3c6d5b68aed..e150aed795ac 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/netpol/network_legacy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/netpol/network_legacy.go @@ -43,7 +43,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" netutils "k8s.io/utils/net" ) @@ -66,7 +65,6 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { var podServer *v1.Pod var podServerLabelSelector string f := framework.NewDefaultFramework("network-policy") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { // Windows does not support network policies. diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/networking.go b/vendor/k8s.io/kubernetes/test/e2e/network/networking.go index 180a7f6b232b..b5c0728a5aea 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/networking.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/networking.go @@ -33,7 +33,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "k8s.io/kubernetes/test/e2e/network/common" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -80,7 +79,6 @@ func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host str var _ = common.SIGDescribe("Networking", func() { var svcname = "nettest" f := framework.NewDefaultFramework(svcname) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { ginkgo.By("Running container which tries to connect to 8.8.8.8") diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go b/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go index 7db62b152e79..136888bd83ba 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go @@ -35,7 +35,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/network/common" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -138,7 +137,6 @@ func iperf2ClientDaemonSet(client clientset.Interface, namespace string) (*appsv var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", func() { // this test runs iperf2: one pod as a server, and a daemonset of clients f := framework.NewDefaultFramework("network-perf") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It(fmt.Sprintf("should run iperf2"), func() { readySchedulableNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go b/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go index b80197ada5f6..540967a31df2 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/test/e2e/network/common" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -77,7 +76,6 @@ var _ = common.SIGDescribe("Proxy", func() { ClientQPS: -1.0, } f := framework.NewFramework("proxy", options, nil) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline prefix := "/api/" + version /* diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service.go b/vendor/k8s.io/kubernetes/test/e2e/network/service.go index c99b2867e5bf..440edc2c59d5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service.go @@ -43,7 +43,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" watch "k8s.io/apimachinery/pkg/watch" - admissionapi "k8s.io/pod-security-admission/api" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -743,7 +742,6 @@ func getEndpointNodesWithInternalIP(jig *e2eservice.TestJig) (map[string]string, var _ = common.SIGDescribe("Services", func() { f := framework.NewDefaultFramework("services") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var cs clientset.Interface serviceLBNames := []string{} @@ -3249,7 +3247,6 @@ func restartComponent(cs clientset.Interface, cName, ns string, matchLabels map[ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() { f := framework.NewDefaultFramework("sctp") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var cs clientset.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go b/vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go index 9fd68f5edc7b..f08394e10e41 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service_latency.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/test/e2e/network/common" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -49,7 +48,6 @@ func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] } var _ = common.SIGDescribe("Service endpoints latency", func() { f := framework.NewDefaultFramework("svc-latency") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.9 diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/apparmor.go b/vendor/k8s.io/kubernetes/test/e2e/node/apparmor.go index 0d01879e71f4..6239050fa6d4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/apparmor.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/apparmor.go @@ -21,14 +21,12 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("AppArmor", func() { f := framework.NewDefaultFramework("apparmor") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("load AppArmor profiles", func() { ginkgo.BeforeEach(func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/crictl.go b/vendor/k8s.io/kubernetes/test/e2e/node/crictl.go index 059420c6b7d6..a841032e8246 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/crictl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/crictl.go @@ -23,14 +23,12 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("crictl", func() { f := framework.NewDefaultFramework("crictl") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { // `crictl` is not available on all cloud providers. diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/events.go b/vendor/k8s.io/kubernetes/test/e2e/node/events.go index aa1b2f39e2a9..86aa54d064bd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/events.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/events.go @@ -29,14 +29,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Events", func() { f := framework.NewDefaultFramework("events") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.It("should be sent by kubelets and the scheduler about pods scheduling and running ", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/examples.go b/vendor/k8s.io/kubernetes/test/e2e/node/examples.go index 1c2fcd6ecc2b..ae948a399a26 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/examples.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/examples.go @@ -34,7 +34,6 @@ import ( e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -45,7 +44,6 @@ const ( var _ = SIGDescribe("[Feature:Example]", func() { f := framework.NewDefaultFramework("examples") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var c clientset.Interface var ns string diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go index 0dd1959adc21..999bbfe64124 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go @@ -39,7 +39,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -271,7 +270,6 @@ var _ = SIGDescribe("kubelet", func() { ns string ) f := framework.NewDefaultFramework("kubelet") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { c = f.ClientSet diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go b/vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go index a4b2e1271648..b1fa8fc4e37e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/mount_propagation.go @@ -27,7 +27,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -80,7 +79,6 @@ func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode var _ = SIGDescribe("Mount propagation", func() { f := framework.NewDefaultFramework("mount-propagation") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should propagate mounts within defined scopes", func() { // This test runs two pods: master and slave with respective mount diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/pods.go b/vendor/k8s.io/kubernetes/test/e2e/node/pods.go index 9fdfa935eb30..a4cf9f3bf5ea 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/pods.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/pods.go @@ -37,13 +37,11 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/prometheus/client_golang/prometheus" @@ -52,7 +50,6 @@ import ( var _ = SIGDescribe("Pods Extended", func() { f := framework.NewDefaultFramework("pods") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Describe("Delete Grace Period", func() { var podClient *framework.PodClient @@ -207,19 +204,274 @@ var _ = SIGDescribe("Pods Extended", func() { ginkgo.It("should never report success for a pending container", func() { ginkgo.By("creating pods that should always exit 1 and terminating the pod after a random delay") - createAndTestPodRepeatedly( - 3, 15, - podFastDeleteScenario{client: podClient.PodInterface, delayMs: 2000}, - podClient.PodInterface, - ) - }) - ginkgo.It("should never report container start when an init container fails", func() { - ginkgo.By("creating pods with an init container that always exit 1 and terminating the pod after a random delay") - createAndTestPodRepeatedly( - 3, 15, - podFastDeleteScenario{client: podClient.PodInterface, delayMs: 2000, initContainer: true}, - podClient.PodInterface, + + var reBug88766 = regexp.MustCompile(`rootfs_linux.*kubernetes\.io~(secret|projected).*no such file or directory`) + + var ( + lock sync.Mutex + errs []error + + wg sync.WaitGroup ) + + r := prometheus.NewRegistry() + h := prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "start_latency", + Objectives: map[float64]float64{ + 0.5: 0.05, + 0.75: 0.025, + 0.9: 0.01, + 0.99: 0.001, + }, + }, []string{"node"}) + r.MustRegister(h) + + const delay = 2000 + const workers = 3 + const pods = 15 + var min, max time.Duration + for i := 0; i < workers; i++ { + wg.Add(1) + go func(i int) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + for retries := 0; retries < pods; retries++ { + name := fmt.Sprintf("pod-submit-status-%d-%d", i, retries) + value := strconv.Itoa(time.Now().Nanosecond()) + one := int64(1) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "name": "foo", + "time": value, + }, + }, + Spec: v1.PodSpec{ + RestartPolicy: v1.RestartPolicyNever, + TerminationGracePeriodSeconds: &one, + Containers: []v1.Container{ + { + Name: "busybox", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{ + "/bin/false", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("5m"), + v1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + }, + }, + }, + } + + // create the pod, capture the change events, then delete the pod + start := time.Now() + created := podClient.Create(pod) + ch := make(chan []watch.Event) + waitForWatch := make(chan struct{}) + go func() { + defer ginkgo.GinkgoRecover() + defer close(ch) + w, err := podClient.Watch(context.TODO(), metav1.ListOptions{ + ResourceVersion: created.ResourceVersion, + FieldSelector: fmt.Sprintf("metadata.name=%s", pod.Name), + }) + if err != nil { + framework.Logf("Unable to watch pod %s: %v", pod.Name, err) + return + } + defer w.Stop() + close(waitForWatch) + events := []watch.Event{ + {Type: watch.Added, Object: created}, + } + for event := range w.ResultChan() { + events = append(events, event) + if event.Type == watch.Error { + framework.Logf("watch error seen for %s: %#v", pod.Name, event.Object) + } + if event.Type == watch.Deleted { + framework.Logf("watch delete seen for %s", pod.Name) + break + } + } + ch <- events + }() + + select { + case <-ch: // in case the goroutine above exits before establishing the watch + case <-waitForWatch: // when the watch is established + } + t := time.Duration(rand.Intn(delay)) * time.Millisecond + time.Sleep(t) + err := podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod") + + var ( + events []watch.Event + ok bool + ) + select { + case events, ok = <-ch: + if !ok { + continue + } + if len(events) < 2 { + framework.Fail("only got a single event") + } + case <-time.After(5 * time.Minute): + framework.Failf("timed out waiting for watch events for %s", pod.Name) + } + + end := time.Now() + + // check the returned events for consistency + var duration, completeDuration time.Duration + var hasContainers, hasTerminated, hasTerminalPhase, hasRunningContainers bool + verifyFn := func(event watch.Event) error { + var ok bool + pod, ok = event.Object.(*v1.Pod) + if !ok { + framework.Logf("Unexpected event object: %s %#v", event.Type, event.Object) + return nil + } + + if len(pod.Status.InitContainerStatuses) != 0 { + return fmt.Errorf("pod %s on node %s had incorrect init containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.InitContainerStatuses) + } + if len(pod.Status.ContainerStatuses) == 0 { + if hasContainers { + return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses) + } + return nil + } + hasContainers = true + if len(pod.Status.ContainerStatuses) != 1 { + return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses) + } + status := pod.Status.ContainerStatuses[0] + t := status.State.Terminated + if hasTerminated { + if status.State.Waiting != nil || status.State.Running != nil { + return fmt.Errorf("pod %s on node %s was terminated and then changed state: %#v", pod.Name, pod.Spec.NodeName, status) + } + if t == nil { + return fmt.Errorf("pod %s on node %s was terminated and then had termination cleared: %#v", pod.Name, pod.Spec.NodeName, status) + } + } + var hasNoStartTime bool + hasRunningContainers = status.State.Waiting == nil && status.State.Terminated == nil + if t != nil { + if !t.FinishedAt.Time.IsZero() { + if t.StartedAt.IsZero() { + hasNoStartTime = true + } else { + duration = t.FinishedAt.Sub(t.StartedAt.Time) + } + completeDuration = t.FinishedAt.Sub(pod.CreationTimestamp.Time) + } + + defer func() { hasTerminated = true }() + switch { + case t.ExitCode == 1: + // expected + case t.ExitCode == 137 && (t.Reason == "ContainerStatusUnknown" || t.Reason == "Error"): + // expected, pod was force-killed after grace period + case t.ExitCode == 128 && (t.Reason == "StartError" || t.Reason == "ContainerCannotRun") && reBug88766.MatchString(t.Message): + // pod volume teardown races with container start in CRI, which reports a failure + framework.Logf("pod %s on node %s failed with the symptoms of https://github.com/kubernetes/kubernetes/issues/88766", pod.Name, pod.Spec.NodeName) + default: + data, _ := json.MarshalIndent(pod.Status, "", " ") + framework.Logf("pod %s on node %s had incorrect final status:\n%s", pod.Name, pod.Spec.NodeName, string(data)) + return fmt.Errorf("pod %s on node %s container unexpected exit code %d: start=%s end=%s reason=%s message=%s", pod.Name, pod.Spec.NodeName, t.ExitCode, t.StartedAt, t.FinishedAt, t.Reason, t.Message) + } + switch { + case duration > time.Hour: + // problem with status reporting + return fmt.Errorf("pod %s container %s on node %s had very long duration %s: start=%s end=%s", pod.Name, status.Name, pod.Spec.NodeName, duration, t.StartedAt, t.FinishedAt) + case hasNoStartTime: + // should never happen + return fmt.Errorf("pod %s container %s on node %s had finish time but not start time: end=%s", pod.Name, status.Name, pod.Spec.NodeName, t.FinishedAt) + } + } + if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { + hasTerminalPhase = true + } else { + if hasTerminalPhase { + return fmt.Errorf("pod %s on node %s was in a terminal phase and then reverted: %#v", pod.Name, pod.Spec.NodeName, pod.Status) + } + } + return nil + } + + var eventErr error + for _, event := range events[1:] { + if err := verifyFn(event); err != nil { + eventErr = err + break + } + } + func() { + lock.Lock() + defer lock.Unlock() + + if eventErr != nil { + errs = append(errs, eventErr) + return + } + + if !hasTerminalPhase { + var names []string + for _, status := range pod.Status.ContainerStatuses { + if status.State.Running != nil { + names = append(names, status.Name) + } + } + switch { + case len(names) > 0: + errs = append(errs, fmt.Errorf("pod %s on node %s did not reach a terminal phase before being deleted but had running containers: phase=%s, running-containers=%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, strings.Join(names, ","))) + case pod.Status.Phase != v1.PodPending: + errs = append(errs, fmt.Errorf("pod %s on node %s was not Pending but has no running containers: phase=%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase)) + } + } + if hasRunningContainers { + data, _ := json.MarshalIndent(pod.Status.ContainerStatuses, "", " ") + errs = append(errs, fmt.Errorf("pod %s on node %s had running or unknown container status before being deleted:\n%s", pod.Name, pod.Spec.NodeName, string(data))) + } + }() + + if duration < min { + min = duration + } + if duration > max || max == 0 { + max = duration + } + h.WithLabelValues(pod.Spec.NodeName).Observe(end.Sub(start).Seconds()) + framework.Logf("Pod %s on node %s timings total=%s t=%s run=%s execute=%s", pod.Name, pod.Spec.NodeName, end.Sub(start), t, completeDuration, duration) + } + + }(i) + } + + wg.Wait() + + if len(errs) > 0 { + var messages []string + for _, err := range errs { + messages = append(messages, err.Error()) + } + framework.Failf("%d errors:\n%v", len(errs), strings.Join(messages, "\n")) + } + values, _ := r.Gather() + var buf bytes.Buffer + for _, m := range values { + expfmt.MetricFamilyToText(&buf, m) + } + framework.Logf("Summary of latencies:\n%s", buf.String()) }) }) @@ -300,422 +552,3 @@ var _ = SIGDescribe("Pods Extended", func() { }) }) }) - -func createAndTestPodRepeatedly(workers, iterations int, scenario podScenario, podClient v1core.PodInterface) { - var ( - lock sync.Mutex - errs []error - - wg sync.WaitGroup - ) - - r := prometheus.NewRegistry() - h := prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Name: "latency", - Objectives: map[float64]float64{ - 0.5: 0.05, - 0.75: 0.025, - 0.9: 0.01, - 0.99: 0.001, - }, - }, []string{"node"}) - r.MustRegister(h) - - for i := 0; i < workers; i++ { - wg.Add(1) - go func(i int) { - defer ginkgo.GinkgoRecover() - defer wg.Done() - for retries := 0; retries < iterations; retries++ { - pod := scenario.Pod(i, retries) - - // create the pod, capture the change events, then delete the pod - start := time.Now() - created, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) - framework.ExpectNoError(err, "failed to create pod") - - ch := make(chan []watch.Event) - waitForWatch := make(chan struct{}) - go func() { - defer ginkgo.GinkgoRecover() - defer close(ch) - w, err := podClient.Watch(context.TODO(), metav1.ListOptions{ - ResourceVersion: created.ResourceVersion, - FieldSelector: fmt.Sprintf("metadata.name=%s", pod.Name), - }) - if err != nil { - framework.Logf("Unable to watch pod %s: %v", pod.Name, err) - return - } - defer w.Stop() - close(waitForWatch) - events := []watch.Event{ - {Type: watch.Added, Object: created}, - } - for event := range w.ResultChan() { - events = append(events, event) - if event.Type == watch.Error { - framework.Logf("watch error seen for %s: %#v", pod.Name, event.Object) - } - if scenario.IsLastEvent(event) { - framework.Logf("watch last event seen for %s", pod.Name) - break - } - } - ch <- events - }() - - select { - case <-ch: // in case the goroutine above exits before establishing the watch - case <-waitForWatch: // when the watch is established - } - - verifier, scenario, err := scenario.Action(pod) - framework.ExpectNoError(err, "failed to take action") - - var ( - events []watch.Event - ok bool - ) - select { - case events, ok = <-ch: - if !ok { - continue - } - if len(events) < 2 { - framework.Fail("only got a single event") - } - case <-time.After(5 * time.Minute): - framework.Failf("timed out waiting for watch events for %s", pod.Name) - } - - end := time.Now() - - var eventErr error - for _, event := range events[1:] { - if err := verifier.Verify(event); err != nil { - eventErr = err - break - } - } - - total := end.Sub(start) - - var lastPod *v1.Pod = pod - func() { - lock.Lock() - defer lock.Unlock() - - if eventErr != nil { - errs = append(errs, eventErr) - return - } - pod, verifyErrs := verifier.VerifyFinal(scenario, total) - if pod != nil { - lastPod = pod - } - errs = append(errs, verifyErrs...) - }() - - h.WithLabelValues(lastPod.Spec.NodeName).Observe(total.Seconds()) - } - }(i) - } - - wg.Wait() - - if len(errs) > 0 { - var messages []string - for _, err := range errs { - messages = append(messages, err.Error()) - } - framework.Failf("%d errors:\n%v", len(errs), strings.Join(messages, "\n")) - } - values, _ := r.Gather() - var buf bytes.Buffer - for _, m := range values { - expfmt.MetricFamilyToText(&buf, m) - } - framework.Logf("Summary of latencies:\n%s", buf.String()) -} - -type podScenario interface { - Pod(worker, attempt int) *v1.Pod - Action(*v1.Pod) (podScenarioVerifier, string, error) - IsLastEvent(event watch.Event) bool -} - -type podScenarioVerifier interface { - Verify(event watch.Event) error - VerifyFinal(scenario string, duration time.Duration) (*v1.Pod, []error) -} - -type podFastDeleteScenario struct { - client v1core.PodInterface - delayMs int - - initContainer bool -} - -func (s podFastDeleteScenario) Verifier(pod *v1.Pod) podScenarioVerifier { - return &podStartVerifier{} -} - -func (s podFastDeleteScenario) IsLastEvent(event watch.Event) bool { - if event.Type == watch.Deleted { - return true - } - return false -} - -func (s podFastDeleteScenario) Action(pod *v1.Pod) (podScenarioVerifier, string, error) { - t := time.Duration(rand.Intn(s.delayMs)) * time.Millisecond - scenario := fmt.Sprintf("t=%s", t) - time.Sleep(t) - return &podStartVerifier{pod: pod}, scenario, s.client.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) -} - -func (s podFastDeleteScenario) Pod(worker, attempt int) *v1.Pod { - name := fmt.Sprintf("pod-terminate-status-%d-%d", worker, attempt) - value := strconv.Itoa(time.Now().Nanosecond()) - one := int64(1) - if s.initContainer { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "name": "foo", - "time": value, - }, - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyNever, - TerminationGracePeriodSeconds: &one, - InitContainers: []v1.Container{ - { - Name: "fail", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{ - "/bin/false", - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("5m"), - v1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - }, - }, - Containers: []v1.Container{ - { - Name: "blocked", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{ - "/bin/true", - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("5m"), - v1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - }, - }, - }, - } - } - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "name": "foo", - "time": value, - }, - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyNever, - TerminationGracePeriodSeconds: &one, - Containers: []v1.Container{ - { - Name: "fail", - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{ - "/bin/false", - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("5m"), - v1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - }, - }, - }, - } -} - -// podStartVerifier checks events for a given pod and looks for unexpected -// transitions. It assumes one container running to completion. -type podStartVerifier struct { - pod *v1.Pod - hasInitContainers bool - hasContainers bool - hasTerminated bool - hasRunningContainers bool - hasTerminalPhase bool - duration time.Duration - completeDuration time.Duration -} - -var reBug88766 = regexp.MustCompile(`rootfs_linux.*kubernetes\.io~(secret|projected).*no such file or directory`) - -// Verify takes successive watch events for a given pod and returns an error if the status is unexpected. -// This verifier works for any pod which has 0 init containers and 1 regular container. -func (v *podStartVerifier) Verify(event watch.Event) error { - var ok bool - pod, ok := event.Object.(*v1.Pod) - if !ok { - framework.Logf("Unexpected event object: %s %#v", event.Type, event.Object) - return nil - } - v.pod = pod - - if len(pod.Spec.InitContainers) > 0 { - if len(pod.Status.InitContainerStatuses) == 0 { - if v.hasInitContainers { - return fmt.Errorf("pod %s on node %s had incorrect init containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.InitContainerStatuses) - } - return nil - } - v.hasInitContainers = true - if len(pod.Status.InitContainerStatuses) != 1 { - return fmt.Errorf("pod %s on node %s had incorrect init containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.InitContainerStatuses) - } - - } else { - if len(pod.Status.InitContainerStatuses) != 0 { - return fmt.Errorf("pod %s on node %s had incorrect init containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.InitContainerStatuses) - } - } - - if len(pod.Status.ContainerStatuses) == 0 { - if v.hasContainers { - return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses) - } - return nil - } - v.hasContainers = true - if len(pod.Status.ContainerStatuses) != 1 { - return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses) - } - - if status := findContainerStatusInPod(pod, "blocked"); status != nil { - if (status.Started != nil && *status.Started == true) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { - return fmt.Errorf("pod %s on node %s should not have started the blocked container: %#v", pod.Name, pod.Spec.NodeName, status) - } - } - - status := findContainerStatusInPod(pod, "fail") - if status == nil { - return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status) - } - - t := status.State.Terminated - if v.hasTerminated { - if status.State.Waiting != nil || status.State.Running != nil { - return fmt.Errorf("pod %s on node %s was terminated and then changed state: %#v", pod.Name, pod.Spec.NodeName, status) - } - if t == nil { - return fmt.Errorf("pod %s on node %s was terminated and then had termination cleared: %#v", pod.Name, pod.Spec.NodeName, status) - } - } - var hasNoStartTime bool - v.hasRunningContainers = status.State.Waiting == nil && status.State.Terminated == nil - if t != nil { - if !t.FinishedAt.Time.IsZero() { - if t.StartedAt.IsZero() { - hasNoStartTime = true - } else { - v.duration = t.FinishedAt.Sub(t.StartedAt.Time) - } - v.completeDuration = t.FinishedAt.Sub(pod.CreationTimestamp.Time) - } - - defer func() { v.hasTerminated = true }() - switch { - case t.ExitCode == 1: - // expected - case t.ExitCode == 137 && (t.Reason == "ContainerStatusUnknown" || t.Reason == "Error"): - // expected, pod was force-killed after grace period - case t.ExitCode == 128 && (t.Reason == "StartError" || t.Reason == "ContainerCannotRun") && reBug88766.MatchString(t.Message): - // pod volume teardown races with container start in CRI, which reports a failure - framework.Logf("pod %s on node %s failed with the symptoms of https://github.com/kubernetes/kubernetes/issues/88766", pod.Name, pod.Spec.NodeName) - default: - data, _ := json.MarshalIndent(pod.Status, "", " ") - framework.Logf("pod %s on node %s had incorrect final status:\n%s", pod.Name, pod.Spec.NodeName, string(data)) - return fmt.Errorf("pod %s on node %s container unexpected exit code %d: start=%s end=%s reason=%s message=%s", pod.Name, pod.Spec.NodeName, t.ExitCode, t.StartedAt, t.FinishedAt, t.Reason, t.Message) - } - switch { - case v.duration > time.Hour: - // problem with status reporting - return fmt.Errorf("pod %s container %s on node %s had very long duration %s: start=%s end=%s", pod.Name, status.Name, pod.Spec.NodeName, v.duration, t.StartedAt, t.FinishedAt) - case hasNoStartTime: - // should never happen - return fmt.Errorf("pod %s container %s on node %s had finish time but not start time: end=%s", pod.Name, status.Name, pod.Spec.NodeName, t.FinishedAt) - } - } - if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { - v.hasTerminalPhase = true - } else { - if v.hasTerminalPhase { - return fmt.Errorf("pod %s on node %s was in a terminal phase and then reverted: %#v", pod.Name, pod.Spec.NodeName, pod.Status) - } - } - return nil -} - -func (v *podStartVerifier) VerifyFinal(scenario string, total time.Duration) (*v1.Pod, []error) { - var errs []error - pod := v.pod - if !v.hasTerminalPhase { - var names []string - for _, status := range pod.Status.ContainerStatuses { - if status.State.Running != nil { - names = append(names, status.Name) - } - } - switch { - case len(names) > 0: - errs = append(errs, fmt.Errorf("pod %s on node %s did not reach a terminal phase before being deleted but had running containers: phase=%s, running-containers=%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, strings.Join(names, ","))) - case pod.Status.Phase != v1.PodPending: - errs = append(errs, fmt.Errorf("pod %s on node %s was not Pending but has no running containers: phase=%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase)) - } - } - if v.hasRunningContainers { - data, _ := json.MarshalIndent(pod.Status.ContainerStatuses, "", " ") - errs = append(errs, fmt.Errorf("pod %s on node %s had running or unknown container status before being deleted:\n%s", pod.Name, pod.Spec.NodeName, string(data))) - } - - framework.Logf("Pod %s on node %s %s total=%s run=%s execute=%s", pod.Name, pod.Spec.NodeName, scenario, total, v.completeDuration, v.duration) - return pod, errs -} - -// findContainerStatusInPod finds a container status by its name in the provided pod -func findContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus { - for _, container := range pod.Status.InitContainerStatuses { - if container.Name == containerName { - return &container - } - } - for _, container := range pod.Status.ContainerStatuses { - if container.Name == containerName { - return &container - } - } - for _, container := range pod.Status.EphemeralContainerStatuses { - if container.Name == containerName { - return &container - } - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/pre_stop.go b/vendor/k8s.io/kubernetes/test/e2e/node/pre_stop.go index 518e2a99e6f3..14960d928691 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/pre_stop.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/pre_stop.go @@ -33,7 +33,6 @@ import ( e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -154,7 +153,6 @@ func testPreStop(c clientset.Interface, ns string) { var _ = SIGDescribe("PreStop", func() { f := framework.NewDefaultFramework("prestop") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *framework.PodClient ginkgo.BeforeEach(func() { podClient = f.PodClient() diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/runtimeclass.go b/vendor/k8s.io/kubernetes/test/e2e/node/runtimeclass.go index d6bda503c8f3..9d10cb4c3b45 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/runtimeclass.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/runtimeclass.go @@ -19,7 +19,6 @@ package node import ( "context" "fmt" - "k8s.io/pod-security-admission/api" v1 "k8s.io/api/core/v1" nodev1 "k8s.io/api/node/v1" @@ -39,7 +38,6 @@ import ( var _ = SIGDescribe("RuntimeClass", func() { f := framework.NewDefaultFramework("runtimeclass") - f.NamespacePodSecurityEnforceLevel = api.LevelBaseline ginkgo.It("should reject a Pod requesting a RuntimeClass with conflicting node selector", func() { labelFooName := "foo-" + string(uuid.NewUUID()) diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/security_context.go b/vendor/k8s.io/kubernetes/test/e2e/node/security_context.go index 171a4194f27a..35075495c655 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/security_context.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/security_context.go @@ -33,7 +33,6 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -66,7 +65,6 @@ func scTestPod(hostIPC bool, hostPID bool) *v1.Pod { var _ = SIGDescribe("Security Context", func() { f := framework.NewDefaultFramework("security-context") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() { pod := scTestPod(false, false) diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/taints.go b/vendor/k8s.io/kubernetes/test/e2e/node/taints.go index db605349183a..dd06d36b07e5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/taints.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/taints.go @@ -32,7 +32,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" // ensure libs have a chance to initialize @@ -161,7 +160,6 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { var cs clientset.Interface var ns string f := framework.NewDefaultFramework("taint-single-pod") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { cs = f.ClientSet @@ -343,7 +341,6 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { var cs clientset.Interface var ns string f := framework.NewDefaultFramework("taint-multiple-pods") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { cs = f.ClientSet diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/limit_range.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/limit_range.go index 7bcb20db76b9..667f0c4417b4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/limit_range.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/limit_range.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -47,7 +46,6 @@ const ( var _ = SIGDescribe("LimitRange", func() { f := framework.NewDefaultFramework("limitrange") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.18 diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go index d0daacfdeb12..17ff4927dc50 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go @@ -38,7 +38,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" @@ -78,7 +77,6 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { var RCName string var ns string f := framework.NewDefaultFramework("sched-pred") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.AfterEach(func() { rc, err := cs.CoreV1().ReplicationControllers(ns).Get(context.TODO(), RCName, metav1.GetOptions{}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/preemption.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/preemption.go index 1f1a0c01ba0c..7cef5a4a6223 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/preemption.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/preemption.go @@ -44,7 +44,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -65,7 +64,6 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var nodeList *v1.NodeList var ns string f := framework.NewDefaultFramework("sched-preemption") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline lowPriority, mediumPriority, highPriority := int32(1), int32(100), int32(1000) lowPriorityClassName := f.BaseName + "-low-priority" @@ -458,7 +456,6 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var node *v1.Node var ns, nodeHostNameLabel string f := framework.NewDefaultFramework("sched-preemption-path") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline priorityPairs := make([]priorityPair, 0) diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go index 840219064146..a5767bfe47ae 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go @@ -43,7 +43,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" - admissionapi "k8s.io/pod-security-admission/api" ) // Resource is a collection of compute resource. @@ -91,7 +90,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { var systemPodsNo int var ns string f := framework.NewDefaultFramework("sched-priority") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.AfterEach(func() { }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/csi_mock_volume.go b/vendor/k8s.io/kubernetes/test/e2e/storage/csi_mock_volume.go index 55a45c5b0eb1..5e7834f0dc4a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/csi_mock_volume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/csi_mock_volume.go @@ -58,7 +58,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" utilptr "k8s.io/utils/pointer" "github.com/onsi/ginkgo" @@ -132,7 +131,6 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { var m mockDriverSetup f := framework.NewDefaultFramework("csi-mock-volumes") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func(tp testParameters) { m = mockDriverSetup{ diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go b/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go index a5db26756b39..ed79810cc3d9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go @@ -31,7 +31,6 @@ import ( e2erc "k8s.io/kubernetes/test/e2e/framework/rc" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -57,7 +56,6 @@ const ( var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { f := framework.NewDefaultFramework("emptydir-wrapper") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline /* Release: v1.13 diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/ephemeral_volume.go b/vendor/k8s.io/kubernetes/test/e2e/storage/ephemeral_volume.go index 0b7251f3eba6..c72b826c4520 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/ephemeral_volume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/ephemeral_volume.go @@ -30,7 +30,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) @@ -46,7 +45,6 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { ) f := framework.NewDefaultFramework("pv") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { c = f.ClientSet diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/flexvolume.go b/vendor/k8s.io/kubernetes/test/e2e/storage/flexvolume.go index 10972707f383..45e0181327bd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/flexvolume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/flexvolume.go @@ -35,7 +35,6 @@ import ( e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -158,7 +157,6 @@ func getHostFromHostPort(hostPort string) string { var _ = utils.SIGDescribe("Flexvolumes", func() { f := framework.NewDefaultFramework("flexvolume") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // note that namespace deletion is handled by delete-namespace flag diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/host_path_type.go b/vendor/k8s.io/kubernetes/test/e2e/storage/host_path_type.go index 390bb951a88e..140ee4324802 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/host_path_type.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/host_path_type.go @@ -31,14 +31,12 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo" ) var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { f := framework.NewDefaultFramework("host-path-type-directory") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( ns string @@ -105,7 +103,6 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { f := framework.NewDefaultFramework("host-path-type-file") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( ns string @@ -174,7 +171,6 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { f := framework.NewDefaultFramework("host-path-type-socket") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( ns string @@ -240,7 +236,6 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { f := framework.NewDefaultFramework("host-path-type-char-dev") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( ns string @@ -310,7 +305,6 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { f := framework.NewDefaultFramework("host-path-type-block-dev") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( ns string diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/local_volume_resize.go b/vendor/k8s.io/kubernetes/test/e2e/storage/local_volume_resize.go index c159ac2ec532..8ac3e81cae63 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/local_volume_resize.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/local_volume_resize.go @@ -36,12 +36,10 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { f := framework.NewDefaultFramework("persistent-local-volumes-expansion") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("loopback local block volume", func() { var ( config *localTestConfig diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go index 65e28b254aca..6c2d94f06610 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go @@ -49,7 +49,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) type localTestConfig struct { @@ -150,7 +149,6 @@ var ( var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { f := framework.NewDefaultFramework("persistent-local-volumes-test") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( config *localTestConfig diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes.go index df6a787c8e8c..329e967ab0db 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes.go @@ -36,7 +36,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's @@ -108,7 +107,6 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { pvc *v1.PersistentVolumeClaim err error ) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { c = f.ClientSet diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/pvc_protection.go b/vendor/k8s.io/kubernetes/test/e2e/storage/pvc_protection.go index a7e6f1758c7c..d9294be6ee20 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/pvc_protection.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/pvc_protection.go @@ -35,7 +35,6 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -70,7 +69,6 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ) f := framework.NewDefaultFramework("pvc-protection") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.BeforeEach(func() { client = f.ClientSet nameSpace = f.Namespace.Name diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/subpath.go b/vendor/k8s.io/kubernetes/test/e2e/storage/subpath.go index 38c58a3d8c81..836bdf2ad239 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/subpath.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/subpath.go @@ -26,12 +26,10 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) var _ = utils.SIGDescribe("Subpath", func() { f := framework.NewDefaultFramework("subpath") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("Atomic writer volumes", func() { var err error diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/disruptive.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/disruptive.go index c8a7318b6b1f..5f167cbe2efa 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/disruptive.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/disruptive.go @@ -29,7 +29,6 @@ import ( storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) type disruptiveTestSuite struct { @@ -90,7 +89,6 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/ephemeral.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/ephemeral.go index 2dafdc15811e..7f5b552d068d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/ephemeral.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/ephemeral.go @@ -36,7 +36,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) type ephemeralTestSuite struct { @@ -118,7 +117,6 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { if pattern.VolType == storageframework.CSIInlineVolume { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/fsgroupchangepolicy.go index 542956e95f72..f323830facc9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -29,7 +29,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" utilpointer "k8s.io/utils/pointer" ) @@ -106,7 +105,6 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { e2eskipper.SkipIfNodeOSDistroIs("windows") diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go index c6db22459dc8..ac06e5c61f8a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go @@ -35,7 +35,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) type multiVolumeTestSuite struct { @@ -105,7 +104,6 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go index 0f3537718eff..eab48a2092c8 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go @@ -40,7 +40,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) // StorageClassTest represents parameters to be used by provisioning tests. @@ -130,7 +129,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go index 66cb3595559b..0519b8d7af6d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go @@ -40,7 +40,6 @@ import ( storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) // data file name @@ -107,7 +106,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshotting") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("volume snapshot controller", func() { var ( diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable_stress.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable_stress.go index b388a3cab35b..392e51e9c57f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable_stress.go @@ -35,7 +35,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) type snapshottableStressTestSuite struct { @@ -122,7 +121,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshottable-stress") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { driverInfo = driver.GetDriverInfo() diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go index 075aaf7b69dc..4bc48d1898a5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) var ( @@ -117,7 +116,6 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go index 6ac3173bb4c1..1d24ffdb6a0b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go @@ -36,7 +36,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) type topologyTestSuite struct { @@ -105,7 +104,6 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("topology", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() topologyTest { dDriver, _ = driver.(storageframework.DynamicPVTestDriver) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go index 6a562550191b..704f4bd2aa30 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go @@ -36,7 +36,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -116,7 +115,6 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go index 25bb2e44e29f..fb84162e0d4e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go @@ -42,7 +42,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) // MD5 hashes of the test file corresponding to each file size. @@ -113,7 +112,6 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumelimits.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumelimits.go index 4c7ebfe03691..dd0acbd0264f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumelimits.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumelimits.go @@ -41,7 +41,6 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) type volumeLimitsTestSuite struct { @@ -114,7 +113,6 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged // This checks that CSIMaxVolumeLimitChecker works as expected. // A randomly chosen node should be able to handle as many CSI volumes as diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go index 42abf99e2c67..50cb5f4ecd84 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go @@ -41,7 +41,6 @@ import ( e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -108,7 +107,6 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go index 7e3d71d08ea7..cef61af2c3e4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go @@ -37,7 +37,6 @@ import ( storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) type volumesTestSuite struct { @@ -130,7 +129,6 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewFrameworkWithCustomTimeouts("volume", storageframework.GetDriverTimeouts(driver)) - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged init := func() { l = local{} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go index 18772c0a31e2..1b300d203c26 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go @@ -52,7 +52,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" ) const ( @@ -136,7 +135,6 @@ func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { var _ = utils.SIGDescribe("Dynamic Provisioning", func() { f := framework.NewDefaultFramework("volume-provisioning") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged // filled in BeforeEach var c clientset.Interface diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go index 0949b426f57f..cee369576085 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volumes.go @@ -28,13 +28,11 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" - admissionapi "k8s.io/pod-security-admission/api" ) // These tests need privileged containers, which are disabled by default. var _ = utils.SIGDescribe("Volumes", func() { f := framework.NewDefaultFramework("volume") - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline // note that namespace deletion is handled by delete-namespace flag // filled inside BeforeEach diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 6790cfb82cde..be61907bb611 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -749,6 +749,10 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { UpdateFunc: func(prev, obj interface{}) { prevNode := prev.(*v1.Node) newNode := obj.(*v1.Node) + if newNode.Labels[v1.LabelTopologyZone] == + prevNode.Labels[v1.LabelTopologyZone] { + return + } az.updateNodeCaches(prevNode, newNode) }, DeleteFunc: func(obj interface{}) { @@ -793,7 +797,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { } } - // Remove from nodeZones cache if using deprecated LabelFailureDomainBetaZone + //Remove from nodeZones cache if using depreciated LabelFailureDomainBetaZone prevZoneFailureDomain, ok := prevNode.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone] if ok && az.isAvailabilityZone(prevZoneFailureDomain) { az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) @@ -808,17 +812,16 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name) } + // Remove from unmanagedNodes cache. managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel] - isNodeManagedByCloudProvider := !ok || managed != "false" - - // Remove unmanagedNodes cache - if !isNodeManagedByCloudProvider { + if ok && managed == "false" { az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name) + az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name) } - if newNode == nil { - // the node is being deleted from the cluster, exclude it from load balancers - az.excludeLoadBalancerNodes.Insert(prevNode.ObjectMeta.Name) + // Remove from excludeLoadBalancerNodes cache. + if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel { + az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name) } } @@ -841,26 +844,16 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { az.nodeResourceGroups[newNode.ObjectMeta.Name] = strings.ToLower(newRG) } - _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers] + // Add to unmanagedNodes cache. managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel] - isNodeManagedByCloudProvider := !ok || managed != "false" - - // update unmanagedNodes cache - if !isNodeManagedByCloudProvider { + if ok && managed == "false" { az.unmanagedNodes.Insert(newNode.ObjectMeta.Name) - } - // update excludeLoadBalancerNodes cache - switch { - case !isNodeManagedByCloudProvider: - az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) - case hasExcludeBalancerLabel: az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) - case !isNodeReady(newNode): + } + + // Add to excludeLoadBalancerNodes cache. + if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel { az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) - default: - // Nodes not falling into the three cases above are valid backends and - // are removed from excludeLoadBalancerNodes cache. - az.excludeLoadBalancerNodes.Delete(newNode.ObjectMeta.Name) } } } @@ -986,14 +979,3 @@ func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, erro return az.excludeLoadBalancerNodes.Has(nodeName), nil } - -// This, along with the few lines that call this function in updateNodeCaches, should be -// replaced by https://github.com/kubernetes-sigs/cloud-provider-azure/pull/1195 once that merges. -func isNodeReady(node *v1.Node) bool { - for _, cond := range node.Status.Conditions { - if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue { - return true - } - } - return false -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 571f47439fc7..2a740d22fcf2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -830,17 +830,17 @@ github.com/openshift/client-go/user/informers/externalversions/internalinterface github.com/openshift/client-go/user/informers/externalversions/user github.com/openshift/client-go/user/informers/externalversions/user/v1 github.com/openshift/client-go/user/listers/user/v1 -# github.com/openshift/library-go v0.0.0-20220315122757-21a67f25d837 +# github.com/openshift/library-go v0.0.0-20220111125907-7f25b9c7ad22 ## explicit github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout github.com/openshift/library-go/pkg/apiserver/apiserverconfig +github.com/openshift/library-go/pkg/apiserver/httprequest github.com/openshift/library-go/pkg/apps/appsserialization github.com/openshift/library-go/pkg/apps/appsutil github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer github.com/openshift/library-go/pkg/authorization/scopemetadata github.com/openshift/library-go/pkg/build/naming -github.com/openshift/library-go/pkg/client/openshiftrestmapper github.com/openshift/library-go/pkg/config/client github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers github.com/openshift/library-go/pkg/config/clusterstatus @@ -1508,7 +1508,7 @@ gopkg.in/warnings.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gopkg.in/yaml.v3 -# k8s.io/api v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/api v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1558,7 +1558,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/apiextensions-apiserver v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1605,7 +1605,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures k8s.io/apiextensions-apiserver/third_party/forked/celopenapi/model -# k8s.io/apimachinery v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/apimachinery v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1669,7 +1669,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/apiserver v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1815,12 +1815,12 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/cli-runtime v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/client-go v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2124,7 +2124,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220130045626-42a86de5afdd k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/credentialconfig @@ -2133,12 +2133,12 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220130045626-42a86de5afdd k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/component-base v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2166,7 +2166,7 @@ k8s.io/component-base/term k8s.io/component-base/traces k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -2179,18 +2179,17 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220130045626-42a86de5afdd k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -k8s.io/cri-api/pkg/errors -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220130045626-42a86de5afdd k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog/v2 v2.30.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/kube-aggregator v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -2241,13 +2240,13 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220130045626-42a86de5afdd k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220130045626-42a86de5afdd k8s.io/kube-scheduler/config/v1beta2 k8s.io/kube-scheduler/config/v1beta3 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/kubectl v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2273,7 +2272,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/kubelet v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/kubelet/config/v1alpha1 k8s.io/kubelet/config/v1beta1 @@ -2286,7 +2285,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.23.0 => github.com/openshift/kubernetes v1.24.0-alpha.0.0.20220325133350-2a2851ce61f8 +# k8s.io/kubernetes v1.23.0 => github.com/openshift/kubernetes v1.22.2-0.20220130045626-42a86de5afdd ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -3045,7 +3044,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/legacy-cloud-providers v0.23.0 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220130045626-42a86de5afdd ## explicit k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure @@ -3089,7 +3088,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220130045626-42a86de5afdd k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 @@ -3098,9 +3097,9 @@ k8s.io/metrics/pkg/apis/external_metrics/v1beta1 k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics/scheme k8s.io/metrics/pkg/client/external_metrics -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220130045626-42a86de5afdd k8s.io/mount-utils -# k8s.io/pod-security-admission v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/pod-security-admission v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220130045626-42a86de5afdd k8s.io/pod-security-admission/admission k8s.io/pod-security-admission/admission/api k8s.io/pod-security-admission/admission/api/load @@ -3111,7 +3110,7 @@ k8s.io/pod-security-admission/admission/api/validation k8s.io/pod-security-admission/api k8s.io/pod-security-admission/metrics k8s.io/pod-security-admission/policy -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220130045626-42a86de5afdd k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 # k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 @@ -3237,31 +3236,31 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml # github.com/google/cadvisor => github.com/google/cadvisor v0.43.0 # github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.24.0-alpha.0.0.20220325133350-2a2851ce61f8 -# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20220325133350-2a2851ce61f8 -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20220325133350-2a2851ce61f8 +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220130045626-42a86de5afdd +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220130045626-42a86de5afdd +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220130045626-42a86de5afdd +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220130045626-42a86de5afdd +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220130045626-42a86de5afdd +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220130045626-42a86de5afdd +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20220130045626-42a86de5afdd +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20220130045626-42a86de5afdd +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20220130045626-42a86de5afdd +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220130045626-42a86de5afdd +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20220130045626-42a86de5afdd +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20220130045626-42a86de5afdd +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20220130045626-42a86de5afdd +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20220130045626-42a86de5afdd +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.22.2-0.20220130045626-42a86de5afdd +# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20220130045626-42a86de5afdd +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20220130045626-42a86de5afdd +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20220130045626-42a86de5afdd +# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20220130045626-42a86de5afdd +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220130045626-42a86de5afdd +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20220130045626-42a86de5afdd +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20220130045626-42a86de5afdd