diff --git a/go.mod b/go.mod index 989bae8fdfd3..8486bd065623 100644 --- a/go.mod +++ b/go.mod @@ -438,7 +438,7 @@ replace ( k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0-rc.2 k8s.io/kubectl => k8s.io/kubectl v0.19.0-rc.2 k8s.io/kubelet => k8s.io/kubelet v0.19.0-rc.2 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201002182502-db1fc96e2de2 + k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201016161205-0c1205cfae5f k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201002182502-db1fc96e2de2 k8s.io/metrics => k8s.io/metrics v0.19.0-rc.2 k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1 diff --git a/go.sum b/go.sum index acaa41e82c6a..e15be81c2552 100644 --- a/go.sum +++ b/go.sum @@ -436,8 +436,8 @@ github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5 h1:E6WhVL5p3rf github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5/go.mod h1:5rGmrkQ8DJEUXA+AR3rEjfH+HFyg4/apY9iCQFgvPfE= github.com/openshift/golang-glog v0.0.0-20190322123450-3c92600d7533 h1:A5VovyRu3JFIPmC20HHrsOOny0PIdHuzDdNMULru48k= github.com/openshift/golang-glog v0.0.0-20190322123450-3c92600d7533/go.mod h1:3sa6LKKRDnR1xy4Kn8htvPwqIOVwXh8fIU3LRY22q3U= -github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201002182502-db1fc96e2de2 h1:x/2vWJIoM+0SwZ0RCbq3ku/wcUCqVE7nZSmETCD+5MI= -github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201002182502-db1fc96e2de2/go.mod h1:thZ+fVBomGpLqnFpgVP7zLoR1cZr5ceY0Ccrrw5DUPc= +github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201016161205-0c1205cfae5f h1:jGEt6NH6La0mJ/pk1juZJP8ie7s52g3YQDuDUCQtqqo= +github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201016161205-0c1205cfae5f/go.mod h1:thZ+fVBomGpLqnFpgVP7zLoR1cZr5ceY0Ccrrw5DUPc= github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201002182502-db1fc96e2de2 h1:vlFNN8ayuvc1PmQhxAEKAQNYR4/kTOOSj1v4fjSD0vE= github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201002182502-db1fc96e2de2/go.mod h1:wR/2WpnpEiQEhbGJyD7pRwxVsSEW4H3QKgvH4LBr97I= github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201002182502-db1fc96e2de2 h1:+0/vXRKjOcS28ZdtTsEnuY/AFbwxMIAa8ym0d9dLiek= diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index b35d208f4646..c16b23f38b40 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -1865,7 +1865,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service": "should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": "should be able to preserve UDP traffic when server pod cycles for a NodePort service [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": "should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] DNS configMap federations [Feature:Federation] should be able to change federation configuration [Slow][Serial]": "should be able to change federation configuration [Slow][Serial] [Disabled:SpecialConfig] [Suite:k8s]", @@ -1973,31 +1973,31 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Network should set TCP CLOSE_WAIT timeout [Privileged]": "should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": "should allow egress access on one named port [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": "should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": "should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": "should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": "should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy]": "should allow ingress access from updated namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy]": "should allow ingress access from updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": "should allow ingress access on one named port [Feature:NetworkPolicy] [Disabled:Broken] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": "should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": "should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": "should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": "should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": "should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": "should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]": "should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2017,19 +2017,19 @@ var annotations = map[string]string{ "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": "should enforce updated policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": "should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Feature:SCTP]": "should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Feature:SCTP] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": "should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": "should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": "should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": "should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy]": "should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Disabled:Unimplemented] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy]": "should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": "should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -2159,7 +2159,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Services should only allow access from service loadbalancer source ranges [Slow]": "should only allow access from service loadbalancer source ranges [Slow] [Suite:k8s]", - "[Top Level] [sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]": "should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]": "should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Services should prevent NodePort collisions": "should prevent NodePort collisions [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/test/extended/util/annotate/rules.go b/test/extended/util/annotate/rules.go index 44a27597cc28..6c2e71ef5ac8 100644 --- a/test/extended/util/annotate/rules.go +++ b/test/extended/util/annotate/rules.go @@ -1,16 +1,17 @@ package main -// NOTE: Only annotation rules targeting tests implemented in origin -// should be added to this file. -// // Rules defined here are additive to the rules already defined for // kube e2e tests in openshift/kubernetes. The kube rules are // vendored via the following file: // // vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go // -// Changes to rules for kube e2e tests should be proposed to -// openshift/kubernetes and vendored back into origin. +// Rules that are needed to pass the upstream e2e test suite in a +// "default OCP CI" configuration (eg, AWS or GCP, openshift-sdn) must +// be added to openshift/kubernetes to allow CI to pass there, and +// then vendored back into origin. Rules that only apply to +// "non-default" configurations (other clouds, other network +// providers) should be added here. var ( testMaps = map[string][]string{ @@ -50,10 +51,13 @@ var ( `\[sig-network\] Networking Granular Checks: Services should function for pod-Service`, }, "[Skipped:gce]": {}, + + // tests that don't pass under openshift-sdn NetworkPolicy mode are specified + // in the rules file in openshift/kubernetes, not here. + // tests that don't pass under openshift-sdn multitenant mode "[Skipped:Network/OpenShiftSDN/Multitenant]": { `\[Feature:NetworkPolicy\]`, // not compatible with multitenant mode - `\[sig-network\] Services should preserve source pod IP for traffic thru service cluster IP`, // known bug, not planned to be fixed }, // tests that don't pass under OVN Kubernetes "[Skipped:Network/OVNKubernetes]": { @@ -66,8 +70,9 @@ var ( `\[sig-network\] Services should have session affinity work for service with type clusterIP`, `\[sig-network\] Services should have session affinity timeout work for NodePort service`, `\[sig-network\] Services should have session affinity timeout work for service with type clusterIP`, - // https://github.com/kubernetes/kubernetes/pull/93597: upstream is flaky - `\[sig-network\] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service`, + + // ovn-kubernetes does not support named ports + `NetworkPolicy.*named port`, }, "[Skipped:ibmcloud]": { // skip Gluster tests (not supported on ROKS worker nodes) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index caa93db294e6..2b9bda9e5ad0 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -831,7 +831,7 @@ type eventRegistrySink struct { var _ genericapiserver.EventSink = eventRegistrySink{} func (s eventRegistrySink) Create(v1event *corev1.Event) (*corev1.Event, error) { - ctx := request.WithNamespace(request.NewContext(), v1event.Namespace) + ctx := request.WithNamespace(request.WithRequestInfo(request.NewContext(), &request.RequestInfo{APIVersion: "v1"}), v1event.Namespace) var event core.Event if err := v1.Convert_v1_Event_To_core_Event(v1event, &event, nil); err != nil { diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index c5487ad59a36..d50e7690a853 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -30,15 +30,7 @@ var ( `Ubernetes`, // Can't set zone labels today `kube-ui`, // Not installed by default `Kubernetes Dashboard`, // Not installed by default (also probably slow image pull) - - `NetworkPolicy.*egress`, // not supported - `NetworkPolicy.*named port`, // not yet implemented - `enforce egress policy`, // not support - `should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons - - `NetworkPolicy.*IPBlock`, // not supported - `NetworkPolicy.*Egress`, // not supported - `NetworkPolicy.*default-deny-all`, // not supported + `should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons }, // tests that rely on special configuration that we do not yet support "[Disabled:SpecialConfig]": { @@ -72,7 +64,6 @@ var ( `should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails `SSH`, // TRIAGE `should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network - `should allow ingress access on one named port`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711602 `recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428 `\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627 @@ -183,6 +174,16 @@ var ( `\[Feature:GKELocalSSD\]`, `\[Feature:GKENodePool\]`, }, + // Tests that don't pass under openshift-sdn. + // These are skipped explicitly by openshift-hack/test-kubernetes-e2e.sh, + // but will also be skipped by openshift-tests in jobs that use openshift-sdn. + "[Skipped:Network/OpenShiftSDN]": { + `NetworkPolicy.*IPBlock`, // feature is not supported by openshift-sdn + `NetworkPolicy.*[Ee]gress`, // feature is not supported by openshift-sdn + `NetworkPolicy.*named port`, // feature is not supported by openshift-sdn + + `NetworkPolicy between server and client should support a 'default-deny-all' policy`, // uses egress feature + }, } // labelExcludes temporarily block tests out of a specific suite diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go index bd5d050fdd8d..fe481ffac9d9 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/patch.go @@ -80,7 +80,8 @@ func OpenShiftKubeAPIServerConfigPatch(genericConfig *genericapiserver.Config, k // END HANDLER CHAIN openshiftAPIServiceReachabilityCheck := newOpenshiftAPIServiceReachabilityCheck() - genericConfig.ReadyzChecks = append(genericConfig.ReadyzChecks, openshiftAPIServiceReachabilityCheck) + oauthAPIServiceReachabilityCheck := newOAuthPIServiceReachabilityCheck() + genericConfig.ReadyzChecks = append(genericConfig.ReadyzChecks, openshiftAPIServiceReachabilityCheck, oauthAPIServiceReachabilityCheck) genericConfig.AddPostStartHookOrDie("openshift.io-startkubeinformers", func(context genericapiserver.PostStartHookContext) error { go openshiftInformers.Start(context.StopCh) @@ -90,6 +91,10 @@ func OpenShiftKubeAPIServerConfigPatch(genericConfig *genericapiserver.Config, k go openshiftAPIServiceReachabilityCheck.checkForConnection(context) return nil }) + genericConfig.AddPostStartHookOrDie("openshift.io-oauth-apiserver-reachable", func(context genericapiserver.PostStartHookContext) error { + go oauthAPIServiceReachabilityCheck.checkForConnection(context) + return nil + }) enablement.AppendPostStartHooksOrDie(genericConfig) return nil diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go index 3d8a86cf714c..59f5353d2952 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver/sdn_readyz_wait.go @@ -18,20 +18,40 @@ import ( "k8s.io/klog/v2" ) -func newOpenshiftAPIServiceReachabilityCheck() *openshiftAPIServiceAvailabilityCheck { - return &openshiftAPIServiceAvailabilityCheck{done: make(chan struct{})} +func newOpenshiftAPIServiceReachabilityCheck() *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck("openshift-apiserver", "api") } -type openshiftAPIServiceAvailabilityCheck struct { +func newOAuthPIServiceReachabilityCheck() *aggregatedAPIServiceAvailabilityCheck { + return newAggregatedAPIServiceReachabilityCheck("openshift-oauth-apiserver", "api") +} + +// if the API service is not found, then this check returns quickly. +// if the endpoint is not accessible within 60 seconds, we report ready no matter what +// otherwise, wait for up to 60 seconds to be able to reach the apiserver +func newAggregatedAPIServiceReachabilityCheck(namespace, service string) *aggregatedAPIServiceAvailabilityCheck { + return &aggregatedAPIServiceAvailabilityCheck{ + done: make(chan struct{}), + namespace: namespace, + serviceName: service, + } +} + +type aggregatedAPIServiceAvailabilityCheck struct { // done indicates that this check is complete (success or failure) and the check should return true done chan struct{} + + // namespace is the namespace hosting the service for the aggregated api + namespace string + // serviceName is used to get a list of endpoints to directly dial + serviceName string } -func (c *openshiftAPIServiceAvailabilityCheck) Name() string { - return "openshift-apiservices-available" +func (c *aggregatedAPIServiceAvailabilityCheck) Name() string { + return fmt.Sprintf("%s-%s-available", c.serviceName, c.namespace) } -func (c *openshiftAPIServiceAvailabilityCheck) Check(req *http.Request) error { +func (c *aggregatedAPIServiceAvailabilityCheck) Check(req *http.Request) error { select { case <-c.done: return nil @@ -40,11 +60,11 @@ func (c *openshiftAPIServiceAvailabilityCheck) Check(req *http.Request) error { } } -func (c *openshiftAPIServiceAvailabilityCheck) checkForConnection(context genericapiserver.PostStartHookContext) { +func (c *aggregatedAPIServiceAvailabilityCheck) checkForConnection(context genericapiserver.PostStartHookContext) { defer utilruntime.HandleCrash() - reachedOpenshiftAPIServer := make(chan struct{}) - noOpenshiftAPIServer := make(chan struct{}) + reachedAggregatedAPIServer := make(chan struct{}) + noAggregatedAPIServer := make(chan struct{}) waitUntilCh := make(chan struct{}) defer func() { close(waitUntilCh) // this stops the endpoint check @@ -58,8 +78,8 @@ func (c *openshiftAPIServiceAvailabilityCheck) checkForConnection(context generi panic(err) } - // Start a thread which repeatedly tries to connect to any openshift-apiserver endpoint. - // 1. if the openshift-apiserver endpoint doesn't exist, logs a warning and reports ready + // Start a thread which repeatedly tries to connect to any aggregated apiserver endpoint. + // 1. if the aggregated apiserver endpoint doesn't exist, logs a warning and reports ready // 2. if a connection cannot be made, after 60 seconds logs an error and reports ready -- this avoids a rebootstrapping cycle // 3. as soon as a connection can be made, logs a time to be ready and reports ready. go func() { @@ -76,11 +96,11 @@ func (c *openshiftAPIServiceAvailabilityCheck) checkForConnection(context generi wait.PollImmediateUntil(1*time.Second, func() (bool, error) { ctx := gocontext.TODO() - openshiftEndpoints, err := kubeClient.CoreV1().Endpoints("openshift-apiserver").Get(ctx, "api", metav1.GetOptions{}) + openshiftEndpoints, err := kubeClient.CoreV1().Endpoints(c.namespace).Get(ctx, c.serviceName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - // if we have no openshift apiserver endpoint, we have no reason to wait - klog.Warning("api.openshift-apiserver.svc endpoints were not found") - close(noOpenshiftAPIServer) + // if we have no aggregated apiserver endpoint, we have no reason to wait + klog.Warningf("%s.%s.svc endpoints were not found", c.serviceName, c.namespace) + close(noAggregatedAPIServer) return true, nil } if err != nil { @@ -94,7 +114,7 @@ func (c *openshiftAPIServiceAvailabilityCheck) checkForConnection(context generi if err == nil { // any http response is fine. it means that we made contact response, dumpErr := httputil.DumpResponse(resp, true) klog.V(4).Infof("reached to connect to %q: %v\n%v", url, dumpErr, string(response)) - close(reachedOpenshiftAPIServer) + close(reachedAggregatedAPIServer) resp.Body.Close() return true, nil } @@ -109,18 +129,18 @@ func (c *openshiftAPIServiceAvailabilityCheck) checkForConnection(context generi select { case <-time.After(60 * time.Second): // if we timeout, always return ok so that we can start from a case where all kube-apiservers are down and the SDN isn't coming up - utilruntime.HandleError(fmt.Errorf("openshift.io-openshift-apiserver-reachable never reached openshift apiservice")) + utilruntime.HandleError(fmt.Errorf("%s never reached apiserver", c.Name())) return case <-context.StopCh: - utilruntime.HandleError(fmt.Errorf("openshift.io-openshift-apiserver-reachable interrupted")) + utilruntime.HandleError(fmt.Errorf("%s interrupted", c.Name())) return - case <-noOpenshiftAPIServer: - utilruntime.HandleError(fmt.Errorf("openshift.io-openshift-apiserver-reachable did not find an openshift-apiserver endpoint")) + case <-noAggregatedAPIServer: + utilruntime.HandleError(fmt.Errorf("%s did not find an %s endpoint", c.Name(), c.namespace)) return - case <-reachedOpenshiftAPIServer: + case <-reachedAggregatedAPIServer: end := time.Now() - klog.Infof("reached openshift apiserver via SDN after %v milliseconds", end.Sub(start).Milliseconds()) + klog.Infof("reached %s via SDN after %v milliseconds", c.namespace, end.Sub(start).Milliseconds()) return } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index 386f82d15919..40b709eb2412 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -1605,7 +1605,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine // convertToAPIContainerStatuses converts the given internal container // statuses into API container statuses. func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus { - convertContainerStatus := func(cs *kubecontainer.Status) *v1.ContainerStatus { + convertContainerStatus := func(cs *kubecontainer.Status, oldStatus *v1.ContainerStatus) *v1.ContainerStatus { cid := cs.ID.String() status := &v1.ContainerStatus{ Name: cs.Name, @@ -1614,17 +1614,17 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon ImageID: cs.ImageID, ContainerID: cid, } - switch cs.State { - case kubecontainer.ContainerStateRunning: + switch { + case cs.State == kubecontainer.ContainerStateRunning: status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)} - case kubecontainer.ContainerStateCreated: + case cs.State == kubecontainer.ContainerStateCreated: // Treat containers in the "created" state as if they are exited. // The pod workers are supposed start all containers it creates in // one sync (syncPod) iteration. There should not be any normal // "created" containers when the pod worker generates the status at // the beginning of a sync iteration. fallthrough - case kubecontainer.ContainerStateExited: + case cs.State == kubecontainer.ContainerStateExited: status.State.Terminated = &v1.ContainerStateTerminated{ ExitCode: int32(cs.ExitCode), Reason: cs.Reason, @@ -1633,7 +1633,31 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon FinishedAt: metav1.NewTime(cs.FinishedAt), ContainerID: cid, } + + case cs.State == kubecontainer.ContainerStateUnknown && + oldStatus != nil && // we have an old status + oldStatus.State.Running != nil: // our previous status was running + // if this happens, then we know that this container was previously running and isn't anymore (assuming the CRI isn't failing to return running containers). + // you can imagine this happening in cases where a container failed and the kubelet didn't ask about it in time to see the result. + // in this case, the container should not to into waiting state immediately because that can make cases like runonce pods actually run + // twice. "container never ran" is different than "container ran and failed". This is handled differently in the kubelet + // and it is handled differently in higher order logic like crashloop detection and handling + status.State.Terminated = &v1.ContainerStateTerminated{ + Reason: "ContainerStatusUnknown", + Message: "The container could not be located when the pod was terminated", + ExitCode: 137, // this code indicates an error + } + // the restart count normally comes from the CRI (see near the top of this method), but since this is being added explicitly + // for the case where the CRI did not return a status, we need to manually increment the restart count to be accurate. + status.RestartCount = oldStatus.RestartCount + 1 + default: + // this collapses any unknown state to container waiting. If any container is waiting, then the pod status moves to pending even if it is running. + // if I'm reading this correctly, then any failure to read status on any container results in the entire pod going pending even if the containers + // are actually running. + // see https://github.com/kubernetes/kubernetes/blob/5d1b3e26af73dde33ecb6a3e69fb5876ceab192f/pkg/kubelet/kuberuntime/kuberuntime_container.go#L497 to + // https://github.com/kubernetes/kubernetes/blob/8976e3620f8963e72084971d9d4decbd026bf49f/pkg/kubelet/kuberuntime/helpers.go#L58-L71 + // and interpreted here https://github.com/kubernetes/kubernetes/blob/b27e78f590a0d43e4a23ca3b2bf1739ca4c6e109/pkg/kubelet/kubelet_pods.go#L1434-L1439 status.State.Waiting = &v1.ContainerStateWaiting{} } return status @@ -1673,6 +1697,70 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon statuses[container.Name] = status } + for _, container := range containers { + found := false + for _, cStatus := range podStatus.ContainerStatuses { + if container.Name == cStatus.Name { + found = true + break + } + } + if found { + continue + } + // if no container is found, then assuming it should be waiting seems plausible, but the status code requires + // that a previous termination be present. If we're offline long enough (or something removed the container?), then + // the previous termination may not be present. This next code block ensures that if the container was previously running + // then when that container status disappears, we can infer that it terminated even if we don't know the status code. + // By setting the lasttermination state we are able to leave the container status waiting and present more accurate + // data via the API. + + oldStatus, ok := oldStatuses[container.Name] + if !ok { + continue + } + if oldStatus.State.Terminated != nil { + // if the old container status was terminated, the lasttermination status is correct + continue + } + if oldStatus.State.Running == nil { + // if the old container status isn't running, then waiting is an appropriate status and we have nothing to do + continue + } + + if pod.DeletionTimestamp == nil { + continue + } + + // and if the pod itself is being deleted, then the CRI may have removed the container already and for whatever reason the kubelet missed the exit code + // (this seems not awesome). We know at this point that we will not be restarting the container. + status := statuses[container.Name] + // if the status we're about to write indicates the default, the Waiting status will force this pod back into Pending. + // That isn't true, we know the pod is going away. + isDefaultWaitingStatus := status.State.Waiting != nil && status.State.Waiting.Reason == "ContainerCreating" + if hasInitContainers { + isDefaultWaitingStatus = status.State.Waiting != nil && status.State.Waiting.Reason == "PodInitializing" + } + if !isDefaultWaitingStatus { + // we the status was written, don't override + continue + } + if status.LastTerminationState.Terminated != nil { + // if we already have a termination state, nothing to do + continue + } + + // setting this value ensures that we show as stopped here, not as waiting: + // https://github.com/kubernetes/kubernetes/blob/90c9f7b3e198e82a756a68ffeac978a00d606e55/pkg/kubelet/kubelet_pods.go#L1440-L1445 + // This prevents the pod from becoming pending + status.LastTerminationState.Terminated = &v1.ContainerStateTerminated{ + Reason: "ContainerStatusUnknown", + Message: "The container could not be located when the pod was deleted. The container used to be Running", + ExitCode: 137, + } + statuses[container.Name] = status + } + // Make the latest container status comes first. sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses))) // Set container statuses according to the statuses seen in pod status @@ -1686,7 +1774,11 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon if containerSeen[cName] >= 2 { continue } - status := convertContainerStatus(cStatus) + var oldStatusPtr *v1.ContainerStatus + if oldStatus, ok := oldStatuses[cName]; ok { + oldStatusPtr = &oldStatus + } + status := convertContainerStatus(cStatus, oldStatusPtr) if containerSeen[cName] == 0 { statuses[cName] = status } else { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index 4c001cb704b1..a0f5922437a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -594,9 +594,9 @@ func (util *rbdUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo volSz := fmt.Sprintf("%d", sz) mon := util.kernelRBDMonitorsOpt(p.Mon) if p.rbdMounter.imageFormat == rbdImageFormat2 { - klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminID, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key ", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminID) } else { - klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminID, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key ", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminID) } args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminID, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat} if p.rbdMounter.imageFormat == rbdImageFormat2 { @@ -632,7 +632,7 @@ func (util *rbdUtil) DeleteImage(p *rbdVolumeDeleter) error { } // rbd rm. mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon) - klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminID, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key ", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminID) output, err = p.exec.Command("rbd", "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminID, "-m", mon, "--key="+p.rbdMounter.adminSecret).CombinedOutput() if err == nil { @@ -668,7 +668,7 @@ func (util *rbdUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc // rbd resize. mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon) - klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminID, rbdExpander.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key ", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminID) output, err = rbdExpander.exec.Command("rbd", "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminID, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret).CombinedOutput() if err == nil { @@ -710,7 +710,7 @@ func (util *rbdUtil) rbdInfo(b *rbdMounter) (int, error) { // # image does not exist (exit=2) // rbd: error opening image 1234: (2) No such file or directory // - klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key ", b.Image, mon, b.Pool, id) output, err = b.exec.Command("rbd", "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret, "-k=/dev/null", "--format=json").CombinedOutput() @@ -773,7 +773,7 @@ func (util *rbdUtil) rbdStatus(b *rbdMounter) (bool, string, error) { // # image does not exist (exit=2) // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory // - klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key ", b.Image, mon, b.Pool, id) cmd, err = b.exec.Command("rbd", "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret).CombinedOutput() output = string(cmd) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go b/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go index 29a9895ad53d..cde79c4ffae2 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/network_policy.go @@ -690,7 +690,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { clientPodName := "client-a" - protocolUDP := v1.ProtocolUDP policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-named-port-egress-rule", @@ -708,11 +707,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { { Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80"}, }, - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, }, }}, }, @@ -956,7 +950,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { ginkgo.It("should work with Ingress,Egress specified together [Feature:NetworkPolicy]", func() { const allowedPort = 80 const notAllowedPort = 81 - protocolUDP := v1.ProtocolUDP nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -992,15 +985,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }}, Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1071,7 +1055,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Ready.") ginkgo.By("Creating a network policy for the server which allows traffic only to a server in different namespace.") - protocolUDP := v1.ProtocolUDP policyAllowToServerInNSB := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Namespace: nsA.Name, @@ -1087,15 +1070,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic only to server-a in namespace-b Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1207,8 +1181,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { framework.ExpectNoError(err, "Error occurred while waiting for pod type: Ready.") }) - protocolUDP := v1.ProtocolUDP - ginkgo.By("Creating client-a which should be able to contact the server before applying policy.", func() { testCanConnect(f, f.Namespace, "client-a", serviceB, 80) }) @@ -1229,15 +1201,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic only to "server-a" Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1366,8 +1329,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { var serviceB *v1.Service var podServerB *v1.Pod - protocolUDP := v1.ProtocolUDP - // Getting podServer's status to get podServer's IP, to create the CIDR podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { @@ -1407,15 +1368,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic to only one CIDR block. Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1442,8 +1394,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }) ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func() { - protocolUDP := v1.ProtocolUDP - // Getting podServer's status to get podServer's IP, to create the CIDR with except clause podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { @@ -1474,15 +1424,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic to only one CIDR block except subnet which includes Server. Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1507,8 +1448,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }) ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func() { - protocolUDP := v1.ProtocolUDP - // Getting podServer's status to get podServer's IP, to create the CIDR with except clause podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { @@ -1536,15 +1475,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic to only one CIDR block except subnet which includes Server. Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1582,15 +1512,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic to only one CIDR block which includes Server. Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -1635,8 +1556,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { var podA, podB *v1.Pod var err error - protocolUDP := v1.ProtocolUDP - // Before applying policy, communication should be successful between pod-a and pod-b podA, serviceA = createServerPodAndService(f, f.Namespace, "pod-a", []int{80}) ginkgo.By("Waiting for pod-a to be ready", func() { @@ -1673,15 +1592,6 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, // Allow traffic to server on pod-b Egress: []networkingv1.NetworkPolicyEgressRule{ - { - Ports: []networkingv1.NetworkPolicyPort{ - // Allow DNS look-ups - { - Protocol: &protocolUDP, - Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, - }, - }, - }, { To: []networkingv1.NetworkPolicyPeer{ { @@ -2009,8 +1919,8 @@ func createNetworkClientPodWithRestartPolicy(f *framework.Framework, namespace * Args: []string{ "/bin/sh", "-c", - fmt.Sprintf("for i in $(seq 1 5); do nc -vz -w 8 %s.%s %d && exit 0 || sleep 1; done; exit 1", - targetService.Name, targetService.Namespace, targetPort), + fmt.Sprintf("for i in $(seq 1 5); do nc -vz -w 8 %s %d && exit 0 || sleep 1; done; exit 1", + targetService.Spec.ClusterIP, targetPort), }, }, }, diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/pods.go b/vendor/k8s.io/kubernetes/test/e2e/node/pods.go index f863f7c4bf11..b15b1dc123ba 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/pods.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/pods.go @@ -274,6 +274,7 @@ var _ = SIGDescribe("Pods Extended", func() { start := time.Now() created := podClient.Create(pod) ch := make(chan []watch.Event) + waitForWatch := make(chan struct{}) go func() { defer ginkgo.GinkgoRecover() defer close(ch) @@ -286,6 +287,7 @@ var _ = SIGDescribe("Pods Extended", func() { return } defer w.Stop() + close(waitForWatch) events := []watch.Event{ {Type: watch.Added, Object: created}, } @@ -302,6 +304,10 @@ var _ = SIGDescribe("Pods Extended", func() { ch <- events }() + select { + case <-ch: // in case the goroutine above exits before establishing the watch + case <-waitForWatch: // when the watch is established + } t := time.Duration(rand.Intn(delay)) * time.Millisecond time.Sleep(t) err := podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) diff --git a/vendor/modules.txt b/vendor/modules.txt index 2140b458b2c0..7c1a01762f9c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2000,7 +2000,7 @@ k8s.io/kubectl/pkg/validation k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/pluginregistration/v1 -# k8s.io/kubernetes v1.19.0 => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201002182502-db1fc96e2de2 +# k8s.io/kubernetes v1.19.0 => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201016161205-0c1205cfae5f ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -3221,7 +3221,7 @@ sigs.k8s.io/yaml # k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.0-rc.2 # k8s.io/kubectl => k8s.io/kubectl v0.19.0-rc.2 # k8s.io/kubelet => k8s.io/kubelet v0.19.0-rc.2 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201002182502-db1fc96e2de2 +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201016161205-0c1205cfae5f # k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201002182502-db1fc96e2de2 # k8s.io/metrics => k8s.io/metrics v0.19.0-rc.2 # k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1