diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d9d5d40eec..e2a0067ee6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,10 +4,6 @@ on: merge_group: pull_request: branches: [ master ] - # Only run jobs if at least one non-doc file is changed - paths-ignore: - - '**/*.md' - - 'mkdocs.yml' schedule: - cron: '0 */12 * * *' workflow_dispatch: diff --git a/contrib/kind-common b/contrib/kind-common index 2a564dece0..bbb7cda7e1 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -388,7 +388,30 @@ install_kubevirt() { local kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") kubectl -n kubevirt patch kubevirt kubevirt --type=json --patch '[{"op":"add","path":"/spec/configuration/network","value":{}},{"op":"add","path":"/spec/configuration/network/binding","value":{"l2bridge":{"domainAttachmentType":"managedTap","migration":{}}}}]' + + if [ ! -d "./bin" ] + then + mkdir -p ./bin + if_error_exit "Failed to create bin dir!" + fi + + if [[ "$OSTYPE" == "linux-gnu" ]]; then + OS_TYPE="linux" + elif [[ "$OSTYPE" == "darwin"* ]]; then + OS_TYPE="darwin" + fi + + pushd ./bin + if [ ! -f ./virtctl ]; then + kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") + cli_name="virtctl-${kubevirt_stable_release_url##*/}-${OS_TYPE}-${ARCH}" + curl -LO "${kubevirt_stable_release_url}/${cli_name}" + mv ${cli_name} virtctl + if_error_exit "Failed to download virtctl!" + fi + popd + chmod +x ./bin/virtctl } install_cert_manager() { diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index 4ca51e888f..49b8da6872 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -14,7 +14,7 @@ ARG OVN_FROM=koji ############################################# # Stage to get OVN and OVS RPMs from source # ############################################# -FROM quay.io/fedora/fedora:42 AS ovnbuilder +FROM quay.io/fedora/fedora:41 AS ovnbuilder USER root @@ -78,8 +78,8 @@ RUN git log -n 1 ######################################## # Stage to download OVN RPMs from koji # ######################################## -FROM quay.io/fedora/fedora:42 AS kojidownloader -ARG ovnver=ovn-24.09.2-71.fc42 +FROM quay.io/fedora/fedora:41 AS kojidownloader +ARG ovnver=ovn-24.09.2-71.fc41 USER root @@ -99,14 +99,14 @@ RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] || [ -z "$TARGETPLATFORM"] ; then k ###################################### # Stage to copy OVN RPMs from source # ###################################### -FROM quay.io/fedora/fedora:42 AS source +FROM quay.io/fedora/fedora:41 AS source COPY --from=ovnbuilder /root/ovn/rpm/rpmbuild/RPMS/x86_64/*.rpm / COPY --from=ovnbuilder /root/ovs/rpm/rpmbuild/RPMS/x86_64/*.rpm / #################################### # Stage to copy OVN RPMs from koji # #################################### -FROM quay.io/fedora/fedora:42 AS koji +FROM quay.io/fedora/fedora:41 AS koji COPY --from=kojidownloader /*.rpm / diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index e016ce4a47..3931d4e180 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -2097,12 +2097,6 @@ ovnkube-controller-with-node() { ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" fi - ovn_disable_requestedchassis_flag= - if [[ ${ovn_disable_requestedchassis} == "true" ]]; then - ovn_disable_requestedchassis_flag="--disable-requestedchassis" - fi - echo "ovn_disable_requestedchassis_flag=${ovn_disable_requestedchassis_flag}" - echo "=============== ovnkube-controller-with-node --init-ovnkube-controller-with-node==========" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} --init-node ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -2156,7 +2150,6 @@ ovnkube-controller-with-node() { ${ssl_opts} \ ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ - ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --export-ovs-metrics \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ diff --git a/dist/templates/ovn-setup.yaml.j2 b/dist/templates/ovn-setup.yaml.j2 index 981a362859..8112e06670 100644 --- a/dist/templates/ovn-setup.yaml.j2 +++ b/dist/templates/ovn-setup.yaml.j2 @@ -89,9 +89,7 @@ spec: networkSelectors: - networkSelectionType: DefaultNetwork nodeSelector: {} - frrConfigurationSelector: - matchLabels: - name: receive-all + frrConfigurationSelector: {} advertisements: - "PodNetwork" {%- endif %} diff --git a/go-controller/.golangci.yml b/go-controller/.golangci.yml index 91be64adc3..d381676a37 100644 --- a/go-controller/.golangci.yml +++ b/go-controller/.golangci.yml @@ -60,10 +60,6 @@ linters-settings: # Other frequently used deps - pkg: github.com/ovn-kubernetes/libovsdb/ovsdb alias: "" - - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util - alias: nodeutil - - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types - alias: nodetypes revive: rules: diff --git a/go-controller/go.mod b/go-controller/go.mod index 72e89c3b7a..f40f5001e2 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -23,7 +23,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 - github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc @@ -57,9 +57,9 @@ require ( gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.32.5 - k8s.io/apimachinery v0.32.5 - k8s.io/client-go v0.32.5 + k8s.io/api v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.32.6 @@ -125,7 +125,7 @@ require ( github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/tools v0.26.0 // indirect diff --git a/go-controller/go.sum b/go-controller/go.sum index 2af1883f7e..50d5e1270d 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -494,8 +494,8 @@ github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CIm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= -github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -945,8 +945,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1317,8 +1317,8 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= -k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -1326,8 +1326,8 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= -k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1335,8 +1335,8 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= -k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 18fb3dbaae..11f7eb79ab 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -951,18 +951,10 @@ func (c *Controller) updateRAStatus(ra *ratypes.RouteAdvertisements, hadUpdates return nil } - var updateStatus bool condition := meta.FindStatusCondition(ra.Status.Conditions, "Accepted") - switch { - case condition == nil: - fallthrough - case condition.ObservedGeneration != ra.Generation: - fallthrough - case (err == nil) != (condition.Status == metav1.ConditionTrue): - fallthrough - case hadUpdates: - updateStatus = true - } + updateStatus := hadUpdates || condition == nil || condition.ObservedGeneration != ra.Generation + updateStatus = updateStatus || err != nil + if !updateStatus { return nil } diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go index 305418425c..03e9391888 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go @@ -47,7 +47,6 @@ type testRA struct { SelectsDefault bool AdvertisePods bool AdvertiseEgressIPs bool - Status *metav1.ConditionStatus } func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { @@ -93,9 +92,6 @@ func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { MatchLabels: tra.FRRConfigurationSelector, } } - if tra.Status != nil { - ra.Status.Conditions = []metav1.Condition{{Type: "Accepted", Status: *tra.Status}} - } return ra } @@ -780,38 +776,6 @@ func TestController_reconcile(t *testing.T) { }, expectNADAnnotations: map[string]map[string]string{"default": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}, "red": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, }, - { - name: "reconciles RouteAdvertisements status even when no other updates are required", - ra: &testRA{Name: "ra", AdvertisePods: true, AdvertiseEgressIPs: true, SelectsDefault: true, Status: ptr.To(metav1.ConditionFalse)}, - frrConfigs: []*testFRRConfig{ - { - Name: "frrConfig", - Namespace: frrNamespace, - Routers: []*testRouter{ - {ASN: 1, Prefixes: []string{"1.1.1.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100"}, - }}, - }, - }, - { - Labels: map[string]string{types.OvnRouteAdvertisementsKey: "ra"}, - Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "ra/frrConfig/node"}, - NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, - Routers: []*testRouter{ - {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, - }}, - }, - }, - }, - nads: []*testNAD{ - {Name: "default", Namespace: "ovn-kubernetes", Network: "default", Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, - }, - nodes: []*testNode{{Name: "node", SubnetsAnnotation: "{\"default\":\"1.1.0.0/24\"}"}}, - eips: []*testEIP{{Name: "eip", EIPs: map[string]string{"node": "1.0.1.1"}}}, - reconcile: "ra", - expectAcceptedStatus: metav1.ConditionTrue, - }, { name: "fails to reconcile a secondary network", ra: &testRA{Name: "ra", AdvertisePods: true, NetworkSelector: map[string]string{"selected": "true"}}, @@ -1041,6 +1005,11 @@ func TestController_reconcile(t *testing.T) { c := NewController(nm.Interface(), wf, fakeClientset) + // prime the default network NAD + if defaultNAD == nil { + defaultNAD, err = c.getOrCreateDefaultNetworkNAD() + g.Expect(err).ToNot(gomega.HaveOccurred()) + } // prime the default network NAD namespace namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -1049,15 +1018,11 @@ func TestController_reconcile(t *testing.T) { } _, err = fakeClientset.KubeClient.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // prime the default network NAD - if defaultNAD == nil { - defaultNAD, err = c.getOrCreateDefaultNetworkNAD() - g.Expect(err).ToNot(gomega.HaveOccurred()) - // update it with the annotation that network manager would set - defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} - _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - } + + // update it with the annotation that network manager would set + defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} + _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) err = wf.Start() g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -1074,13 +1039,7 @@ func TestController_reconcile(t *testing.T) { ) err = nm.Start() - // some test cases start with a bad RA status, avoid asserting - // initial sync in this case as it will fail - if tt.ra == nil || tt.ra.Status == nil || *tt.ra.Status == metav1.ConditionTrue { - g.Expect(err).ToNot(gomega.HaveOccurred()) - } else { - g.Expect(err).To(gomega.HaveOccurred()) - } + g.Expect(err).ToNot(gomega.HaveOccurred()) // we just need the inital sync nm.Stop() diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go index 67292bd2ed..e8c1d74a03 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go @@ -37,7 +37,6 @@ import ( userdefinednetworkscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/scheme" userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -390,14 +389,6 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, nil } - var role, topology string - if udn.Spec.Layer2 != nil { - role = string(udn.Spec.Layer2.Role) - } else if udn.Spec.Layer3 != nil { - role = string(udn.Spec.Layer3.Role) - } - topology = string(udn.Spec.Topology) - if !udn.DeletionTimestamp.IsZero() { // udn is being deleted if controllerutil.ContainsFinalizer(udn, template.FinalizerUserDefinedNetwork) { if err := c.deleteNAD(udn, udn.Namespace); err != nil { @@ -410,7 +401,6 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to remove finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Finalizer removed from UserDefinedNetworks [%s/%s]", udn.Namespace, udn.Name) - metrics.DecrementUDNCount(role, topology) } return nil, nil @@ -422,7 +412,6 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to add finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Added Finalizer to UserDefinedNetwork [%s/%s]", udn.Namespace, udn.Name) - metrics.IncrementUDNCount(role, topology) } return c.updateNAD(udn, udn.Namespace) @@ -550,16 +539,6 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine cudnName := cudn.Name affectedNamespaces := c.namespaceTracker[cudnName] - var role, topology string - if cudn.Spec.Network.Layer2 != nil { - role = string(cudn.Spec.Network.Layer2.Role) - } else if cudn.Spec.Network.Layer3 != nil { - role = string(cudn.Spec.Network.Layer3.Role) - } else if cudn.Spec.Network.Localnet != nil { - role = string(cudn.Spec.Network.Localnet.Role) - } - topology = string(cudn.Spec.Network.Topology) - if !cudn.DeletionTimestamp.IsZero() { if controllerutil.ContainsFinalizer(cudn, template.FinalizerUserDefinedNetwork) { var errs []error @@ -585,7 +564,6 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine } klog.Infof("Finalizer removed from ClusterUserDefinedNetwork %q", cudn.Name) delete(c.namespaceTracker, cudnName) - metrics.DecrementCUDNCount(role, topology) } return nil, nil @@ -603,7 +581,6 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine return nil, fmt.Errorf("failed to add finalizer to ClusterUserDefinedNetwork %q: %w", cudnName, err) } klog.Infof("Added Finalizer to ClusterUserDefinedNetwork %q", cudnName) - metrics.IncrementCUDNCount(role, topology) } selectedNamespaces, err := c.getSelectedNamespaces(cudn.Spec.NamespaceSelector) diff --git a/go-controller/pkg/libovsdb/ops/portbinding.go b/go-controller/pkg/libovsdb/ops/portbinding.go new file mode 100644 index 0000000000..0267a794c0 --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/portbinding.go @@ -0,0 +1,53 @@ +package ops + +import ( + "fmt" + + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that +// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds +// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis +// record for the remote zone nodes. +// TODO (numans) remove this function once OVN supports binding a port binding for a remote +// chassis. +func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error { + ch, err := GetChassis(sbClient, chassis) + if err != nil { + return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err) + } + portBinding.Chassis = &ch.UUID + + opModel := operationModel{ + Model: portBinding, + OnModelUpdates: []interface{}{&portBinding.Chassis}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// GetPortBinding looks up a portBinding in SBDB +func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) { + found := []*sbdb.PortBinding{} + opModel := operationModel{ + Model: portBinding, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} diff --git a/go-controller/pkg/metrics/cluster_manager.go b/go-controller/pkg/metrics/cluster_manager.go index 711d4dc026..f97a338b89 100644 --- a/go-controller/pkg/metrics/cluster_manager.go +++ b/go-controller/pkg/metrics/cluster_manager.go @@ -91,28 +91,6 @@ var metricEgressIPRebalanceCount = prometheus.NewCounter(prometheus.CounterOpts{ /** EgressIP metrics recorded from cluster-manager ends**/ -var metricUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: types.MetricOvnkubeNamespace, - Subsystem: types.MetricOvnkubeSubsystemClusterManager, - Name: "user_defined_networks", - Help: "The total number of UserDefinedNetworks in the cluster"}, - []string{ - "role", - "topology", - }, -) - -var metricCUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: types.MetricOvnkubeNamespace, - Subsystem: types.MetricOvnkubeSubsystemClusterManager, - Name: "cluster_user_defined_networks", - Help: "The total number of ClusterUserDefinedNetworks in the cluster"}, - []string{ - "role", - "topology", - }, -) - // RegisterClusterManagerBase registers ovnkube cluster manager base metrics with the Prometheus registry. // This function should only be called once. func RegisterClusterManagerBase() { @@ -152,8 +130,6 @@ func RegisterClusterManagerFunctional() { prometheus.MustRegister(metricEgressIPRebalanceCount) prometheus.MustRegister(metricEgressIPCount) } - prometheus.MustRegister(metricUDNCount) - prometheus.MustRegister(metricCUDNCount) if err := prometheus.Register(MetricResourceRetryFailuresCount); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) @@ -189,23 +165,3 @@ func RecordEgressIPRebalance(count int) { func RecordEgressIPCount(count float64) { metricEgressIPCount.Set(count) } - -// IncrementUDNCount increments the number of UserDefinedNetworks of the given type -func IncrementUDNCount(role, topology string) { - metricUDNCount.WithLabelValues(role, topology).Inc() -} - -// DecrementUDNCount decrements the number of UserDefinedNetworks of the given type -func DecrementUDNCount(role, topology string) { - metricUDNCount.WithLabelValues(role, topology).Dec() -} - -// IncrementCUDNCount increments the number of ClusterUserDefinedNetworks of the given type -func IncrementCUDNCount(role, topology string) { - metricCUDNCount.WithLabelValues(role, topology).Inc() -} - -// DecrementCUDNCount decrements the number of ClusterUserDefinedNetworks of the given type -func DecrementCUDNCount(role, topology string) { - metricCUDNCount.WithLabelValues(role, topology).Dec() -} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go deleted file mode 100644 index 4031ff3cc8..0000000000 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ /dev/null @@ -1,560 +0,0 @@ -package bridgeconfig - -import ( - "fmt" - "net" - "strings" - "sync" - "sync/atomic" - - corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" - nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" -) - -// BridgeUDNConfiguration holds the patchport and ctMark -// information for a given network -type BridgeUDNConfiguration struct { - PatchPort string - OfPortPatch string - MasqCTMark string - PktMark string - V4MasqIPs *udn.MasqueradeIPs - V6MasqIPs *udn.MasqueradeIPs - Subnets []config.CIDRNetworkEntry - NodeSubnets []*net.IPNet - Advertised atomic.Bool -} - -func (netConfig *BridgeUDNConfiguration) ShallowCopy() *BridgeUDNConfiguration { - copy := &BridgeUDNConfiguration{ - PatchPort: netConfig.PatchPort, - OfPortPatch: netConfig.OfPortPatch, - MasqCTMark: netConfig.MasqCTMark, - PktMark: netConfig.PktMark, - V4MasqIPs: netConfig.V4MasqIPs, - V6MasqIPs: netConfig.V6MasqIPs, - Subnets: netConfig.Subnets, - NodeSubnets: netConfig.NodeSubnets, - } - copy.Advertised.Store(netConfig.Advertised.Load()) - return copy -} - -func (netConfig *BridgeUDNConfiguration) IsDefaultNetwork() bool { - return netConfig.MasqCTMark == nodetypes.CtMarkOVN -} - -func (netConfig *BridgeUDNConfiguration) setOfPatchPort() error { - ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.PatchPort, "ofport") - if err != nil { - return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ - "while getting ofport. stderr: %v, error: %v", netConfig.PatchPort, stderr, err) - } - netConfig.OfPortPatch = ofportPatch - return nil -} - -type BridgeConfiguration struct { - mutex sync.Mutex - - // variables that are only set on creation and never changed - // don't require mutex lock to read - nodeName string - bridgeName string - uplinkName string - gwIface string - gwIfaceRep string - interfaceID string - - // variables that can be updated (read/write access should be done with mutex held) - ofPortHost string - ips []*net.IPNet - macAddress net.HardwareAddr - ofPortPhys string - netConfig map[string]*BridgeUDNConfiguration - eipMarkIPs *egressip.MarkIPsCache -} - -func NewBridgeConfiguration(intfName, nodeName, - physicalNetworkName string, - nodeSubnets, gwIPs []*net.IPNet, - advertised bool) (*BridgeConfiguration, error) { - var intfRep string - var err error - isGWAcclInterface := false - gwIntf := intfName - - defaultNetConfig := &BridgeUDNConfiguration{ - MasqCTMark: nodetypes.CtMarkOVN, - Subnets: config.Default.ClusterSubnets, - NodeSubnets: nodeSubnets, - } - res := BridgeConfiguration{ - nodeName: nodeName, - netConfig: map[string]*BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - eipMarkIPs: egressip.NewMarkIPsCache(), - } - res.netConfig[types.DefaultNetworkName].Advertised.Store(advertised) - - if config.Gateway.GatewayAcceleratedInterface != "" { - // Try to get representor for the specified gateway device. - // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device - // for node IP, Host Ofport for Openflow etc. - // If failed - error for improper configuration option - intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) - if err != nil { - return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) - } - gwIntf = config.Gateway.GatewayAcceleratedInterface - isGWAcclInterface = true - klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) - } else { - intfRep, err = getRepresentor(gwIntf) - if err == nil { - isGWAcclInterface = true - } - } - - if isGWAcclInterface { - bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) - if err != nil { - return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) - } - link, err := util.GetNetLinkOps().LinkByName(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) - } - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.uplinkName = uplinkName - res.gwIfaceRep = intfRep - res.gwIface = gwIntf - res.macAddress = link.Attrs().HardwareAddr - } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { - // This is an OVS bridge's internal port - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = uplinkName - gwIntf = bridgeName - } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { - // This is not a OVS bridge. We need to create a OVS bridge - // and add cluster.GatewayIntf as a port of that bridge. - bridgeName, err := util.NicToBridge(intfName) - if err != nil { - return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = intfName - gwIntf = bridgeName - } else { - // gateway interface is an OVS bridge - uplinkName, err := getIntfName(intfName) - if err != nil { - if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { - klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) - } else { - return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) - } - } else { - res.uplinkName = uplinkName - } - res.bridgeName = intfName - res.gwIface = intfName - } - // Now, we get IP addresses for the bridge - if len(gwIPs) > 0 { - // use gwIPs if provided - res.ips = gwIPs - } else { - // get IP addresses from OVS bridge. If IP does not exist, - // error out. - res.ips, err = nodeutil.GetNetworkInterfaceIPAddresses(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) - } - } - - if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface - res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) - } - } - - res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) - if err != nil { - return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) - } - - // the name of the patch port created by ovn-controller is of the form - // patch--to-br-int - defaultNetConfig.PatchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) - - // for DPU we use the host MAC address for the Gateway configuration - if config.OvnKubeNode.Mode == types.NodeModeDPU { - hostRep, err := util.GetDPUHostInterface(res.bridgeName) - if err != nil { - return nil, err - } - res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) - if err != nil { - return nil, err - } - } - - // If gwIface is set, then accelerated GW interface is present and we use it. Else use external bridge instead. - if res.gwIface == "" { - res.gwIface = res.bridgeName - } - - return &res, nil -} - -func (b *BridgeConfiguration) GetGatewayIface() string { - return b.gwIface -} - -// UpdateInterfaceIPAddresses sets and returns the bridge's current ips -func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.mutex.Lock() - defer b.mutex.Unlock() - ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) - if err != nil { - return nil, err - } - - // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead of the DPU's external bridge IP address. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) - if err != nil { - return nil, err - } - // For DPU mode, we only support IPv4 for now. - nodeAddrStr := nodeIfAddr.IPv4 - - nodeAddr, _, err := net.ParseCIDR(nodeAddrStr) - if err != nil { - return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) - } - ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) - if err != nil { - return nil, err - } - } - - b.ips = ifAddrs - return ifAddrs, nil -} - -// GetPortConfigurations returns a slice of Network port configurations along with the -// uplinkName and physical port's ofport value -func (b *BridgeConfiguration) GetPortConfigurations() ([]*BridgeUDNConfiguration, string, string) { - b.mutex.Lock() - defer b.mutex.Unlock() - var netConfigs []*BridgeUDNConfiguration - for _, netConfig := range b.netConfig { - netConfigs = append(netConfigs, netConfig.ShallowCopy()) - } - return netConfigs, b.uplinkName, b.ofPortPhys -} - -// AddNetworkConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache -func (b *BridgeConfiguration) AddNetworkConfig( - nInfo util.NetInfo, - nodeSubnets []*net.IPNet, - masqCTMark, pktMark uint, - v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - b.mutex.Lock() - defer b.mutex.Unlock() - - netName := nInfo.GetNetworkName() - patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) - - _, found := b.netConfig[netName] - if !found { - netConfig := &BridgeUDNConfiguration{ - PatchPort: patchPort, - MasqCTMark: fmt.Sprintf("0x%x", masqCTMark), - PktMark: fmt.Sprintf("0x%x", pktMark), - V4MasqIPs: v4MasqIPs, - V6MasqIPs: v6MasqIPs, - Subnets: nInfo.Subnets(), - NodeSubnets: nodeSubnets, - } - netConfig.Advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) - - b.netConfig[netName] = netConfig - } else { - klog.Warningf("Trying to update bridge config for network %s which already"+ - "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) - } - return nil -} - -// DelNetworkConfig deletes the provided netInfo from the bridge configuration cache -func (b *BridgeConfiguration) DelNetworkConfig(nInfo util.NetInfo) { - b.mutex.Lock() - defer b.mutex.Unlock() - - delete(b.netConfig, nInfo.GetNetworkName()) -} - -func (b *BridgeConfiguration) GetNetworkConfig(networkName string) *BridgeUDNConfiguration { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.netConfig[networkName] -} - -// GetActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the -// provided netInfo. -// -// NOTE: if the network configuration can't be found or if the network is not patched by OVN -// yet this returns nil. -func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName string) *BridgeUDNConfiguration { - b.mutex.Lock() - defer b.mutex.Unlock() - - if netConfig, found := b.netConfig[networkName]; found && netConfig.OfPortPatch != "" { - return netConfig.ShallowCopy() - } - return nil -} - -// must be called with mutex held -func (b *BridgeConfiguration) patchedNetConfigs() []*BridgeUDNConfiguration { - result := make([]*BridgeUDNConfiguration, 0, len(b.netConfig)) - for _, netConfig := range b.netConfig { - if netConfig.OfPortPatch == "" { - continue - } - result = append(result, netConfig) - } - return result -} - -// IsGatewayReady checks if patch ports of every netConfig are present. -// used by gateway on newGateway readyFunc -func (b *BridgeConfiguration) IsGatewayReady() bool { - b.mutex.Lock() - defer b.mutex.Unlock() - for _, netConfig := range b.netConfig { - ready := gatewayReady(netConfig.PatchPort) - if !ready { - return false - } - } - return true -} - -func (b *BridgeConfiguration) SetOfPorts() error { - b.mutex.Lock() - defer b.mutex.Unlock() - // Get ofport of patchPort - for _, netConfig := range b.netConfig { - if err := netConfig.setOfPatchPort(); err != nil { - return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) - } - } - - if b.uplinkName != "" { - // Get ofport of physical interface - ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", b.uplinkName, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - b.uplinkName, stderr, err) - } - b.ofPortPhys = ofportPhys - } - - // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - var stderr string - hostRep, err := util.GetDPUHostInterface(b.bridgeName) - if err != nil { - return err - } - - b.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", - hostRep, stderr, err) - } - } else { - var err error - if b.gwIfaceRep != "" { - b.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", b.gwIfaceRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", b.gwIfaceRep, err) - } - } else { - b.ofPortHost = nodetypes.OvsLocalPort - } - } - - return nil -} - -func (b *BridgeConfiguration) GetIPs() []*net.IPNet { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.ips -} - -func (b *BridgeConfiguration) GetBridgeName() string { - return b.bridgeName -} - -func (b *BridgeConfiguration) GetUplinkName() string { - return b.uplinkName -} - -func (b *BridgeConfiguration) GetMAC() net.HardwareAddr { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.macAddress -} - -func (b *BridgeConfiguration) SetMAC(macAddr net.HardwareAddr) { - b.mutex.Lock() - defer b.mutex.Unlock() - b.macAddress = macAddr -} - -func (b *BridgeConfiguration) SetNetworkOfPatchPort(netName string) error { - b.mutex.Lock() - defer b.mutex.Unlock() - - netConfig, found := b.netConfig[netName] - if !found { - return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, b.bridgeName) - } - return netConfig.setOfPatchPort() -} - -func (b *BridgeConfiguration) GetInterfaceID() string { - return b.interfaceID -} - -func (b *BridgeConfiguration) GetOfPortHost() string { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.ofPortHost -} - -func (b *BridgeConfiguration) GetEIPMarkIPs() *egressip.MarkIPsCache { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.eipMarkIPs -} - -func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { - b.mutex.Lock() - defer b.mutex.Unlock() - b.eipMarkIPs = eipMarkIPs -} - -func gatewayReady(patchPort string) bool { - // Get ofport of patchPort - ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") - if err != nil || len(ofport) == 0 { - return false - } - klog.Info("Gateway is ready") - return true -} - -func getIntfName(gatewayIntf string) (string, error) { - // The given (or autodetected) interface is an OVS bridge and this could be - // created by us using util.NicToBridge() or it was pre-created by the user. - - // Is intfName a port of gatewayIntf? - intfName, err := util.GetNicName(gatewayIntf) - if err != nil { - return "", err - } - _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") - if err != nil { - return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - intfName, stderr, err) - } - return intfName, nil -} - -// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, -// and returns an ifaceID created from the bridge name and the node name -func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { - // IPv6 forwarding is enabled globally - if config.IPv4Mode { - // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) - // systctl output enforces dot as path separator - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { - return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", - bridgeName, stdout, stderr, err) - } - } - - // ovn-bridge-mappings maps a physical network name to a local ovs bridge - // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. - // Note that there may be multiple ovs bridge mappings, be sure not to override - // the mappings for the other physical network - stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", - "external_ids:ovn-bridge-mappings") - if err != nil { - return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) - } - // skip the existing mapping setting for the specified physicalNetworkName - mapString := "" - bridgeMappings := strings.Split(stdout, ",") - for _, bridgeMapping := range bridgeMappings { - m := strings.Split(bridgeMapping, ":") - if network := m[0]; network != physicalNetworkName { - if len(mapString) != 0 { - mapString += "," - } - mapString += bridgeMapping - } - } - if len(mapString) != 0 { - mapString += "," - } - mapString += physicalNetworkName + ":" + bridgeName - - _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", - fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) - if err != nil { - return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ - ", stderr:%s (%v)", bridgeName, stderr, err) - } - - ifaceID := bridgeName + "_" + nodeName - return ifaceID, nil -} - -func getRepresentor(intfName string) (string, error) { - deviceID, err := util.GetDeviceIDFromNetdevice(intfName) - if err != nil { - return "", err - } - - return util.GetFunctionRepresentorName(deviceID) -} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go deleted file mode 100644 index 8395baf06d..0000000000 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ /dev/null @@ -1,139 +0,0 @@ -package bridgeconfig - -import ( - "fmt" - "net" - "strings" - - net2 "k8s.io/utils/net" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestDefaultBridgeConfig() *BridgeConfiguration { - defaultNetConfig := &BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } - return &BridgeConfiguration{ - netConfig: map[string]*BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - } -} - -func TestBridgeConfig(brName string) *BridgeConfiguration { - return &BridgeConfiguration{ - bridgeName: brName, - gwIface: brName, - } -} - -func (b *BridgeConfiguration) GetNetConfigLen() int { - b.mutex.Lock() - defer b.mutex.Unlock() - return len(b.netConfig) -} - -func CheckUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var mgmtMasqIP string - var protoPrefix string - if net2.IsIPv4CIDR(svcCIDR) { - mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip" - } else { - mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", - protoPrefix, protoPrefix, mgmtMasqIP)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - -func CheckAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking advertised UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var matchingIPFamilySubnet *net.IPNet - var protoPrefix string - var udnAdvertisedSubnets []*net.IPNet - var err error - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - if net2.IsIPv4CIDR(svcCIDR) { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip" - } else { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", - protoPrefix, protoPrefix, matchingIPFamilySubnet)) { - nFlows++ - } - if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", - protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - -func CheckDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *BridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { - By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) - - var masqIP string - var masqSubnet string - var protoPrefix string - if net2.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ip6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - var nTable0DefaultFlows int - var nTable0UDNMasqFlows int - var nTable2Flows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", - ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, - masqIP)) { - nTable0DefaultFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", - ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { - nTable0UDNMasqFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", - bridgeMAC, defaultConfig.OfPortPatch)) { - nTable2Flows++ - } - } - - Expect(nTable0DefaultFlows).To(Equal(1)) - Expect(nTable0UDNMasqFlows).To(Equal(1)) - Expect(nTable2Flows).To(Equal(1)) -} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go deleted file mode 100644 index d03b88c8de..0000000000 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ /dev/null @@ -1,970 +0,0 @@ -package bridgeconfig - -import ( - "fmt" - "net" - - "k8s.io/klog/v2" - utilnet "k8s.io/utils/net" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" - nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" -) - -func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extraIPs []net.IP) ([]string, error) { - b.mutex.Lock() - defer b.mutex.Unlock() - dftFlows, err := b.flowsForDefaultBridge(extraIPs) - if err != nil { - return nil, err - } - dftCommonFlows, err := b.commonFlows(hostSubnets) - if err != nil { - return nil, err - } - return append(dftFlows, dftCommonFlows...), nil -} - -func (b *BridgeConfiguration) ExternalBridgeFlows(hostSubnets []*net.IPNet) ([]string, error) { - b.mutex.Lock() - defer b.mutex.Unlock() - return b.commonFlows(hostSubnets) -} - -// must be called with bridge.mutex held -func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - - ofPortPhys := b.ofPortPhys - bridgeMacAddress := b.macAddress.String() - ofPortHost := b.ofPortHost - bridgeIPs := b.ips - - var dftFlows []string - // 14 bytes of overhead for ethernet header (does not include VLAN) - maxPktLength := getMaxFrameLength() - - strip_vlan := "" - mod_vlan_id := "" - match_vlan := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if config.IPv4Mode { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) - } - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - for _, netConfig := range b.patchedNetConfigs() { - // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() == nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { - continue - } - - for _, netConfig := range b.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - if config.IPv6Mode { - if ofPortPhys != "" { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, nodetypes.OvsLocalPort, config.Default.EncapPort, ofPortPhys)) - } - - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range b.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() != nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { - continue - } - - for _, netConfig := range b.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - - var protoPrefix, masqIP, masqSubnet string - - // table 0, packets coming from Host -> Service - for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { - if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ipv6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=2)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) - - if util.IsNetworkSegmentationSupportEnabled() { - // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. - // For packets originating from UDN, commit without NATing, those - // have already been SNATed to the masq IP of the UDN. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - if util.IsRouteAdvertisementsEnabled() { - // If the UDN is advertised then instead of matching on the masqSubnet - // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 - // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 - // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range b.patchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - if netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) - continue - } - - // Use the filtered subnet for the flow compute instead of the masqueradeIP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - } - } - } - } - - masqDst := masqIP - if util.IsNetworkSegmentationSupportEnabled() { - // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services - masqDst = masqSubnet - } - for _, netConfig := range b.patchedNetConfigs() { - // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ - "actions=ct(zone=%d,nat,table=3)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR, - protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) - // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either - // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. - // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ - "actions=drop", nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR)) - } - } - - // table 0, add IP fragment reassembly flows, only needed in SGW mode with - // physical interface attached to bridge - if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { - reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) - dftFlows = append(dftFlows, reassemblyFlows...) - } - if ofPortPhys != "" { - for _, netConfig := range b.patchedNetConfigs() { - var actions string - if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { - actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) - } else { - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) - } - - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - } - - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - } - } - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) - - } - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) - - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - } - - defaultNetConfig := b.netConfig[types.DefaultNetworkName] - - // table 2, dispatch from Host -> OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=2, "+ - "actions=set_field:%s->eth_dst,%soutput:%s", nodetypes.DefaultOpenFlowCookie, - bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) - - // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have - // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. - if config.IPv4Mode { - for _, netConfig := range b.patchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.V4MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that - // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to - // a service in another UDN. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ - "actions=drop", - nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, - bridgeMacAddress, netConfig.OfPortPatch)) - } - } - - if config.IPv6Mode { - for _, netConfig := range b.patchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.V6MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ - "actions=drop", - nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, - bridgeMacAddress, netConfig.OfPortPatch)) - } - } - - // table 3, dispatch from OVN -> Host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=3, %s "+ - "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - // table 4, hairpinned pkts that need to go from OVN -> Host - // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ip,"+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ipv6, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) - } - // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ip, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ipv6, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - return dftFlows, nil -} - -// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle -func getMaxFrameLength() int { - return config.Default.MTU + 14 -} - -// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a -// specific conntrack zone for reassembly with the same priority as node port -// flows that match on L4 fields. After reassembly packets are reinjected to -// table 0 again. This requires a conntrack immplementation that reassembles -// fragments. This reqreuiment is met for the kernel datapath with the netfilter -// module loaded. This reqreuiment is not met for the userspace datapath. -func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { - flows := make([]string, 0, 2) - if config.IPv4Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", - nodetypes.DefaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - if config.IPv6Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", - nodetypes.DefaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - - return flows -} - -// must be called with bridge.mutex held -func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - ofPortPhys := b.ofPortPhys - bridgeMacAddress := b.macAddress.String() - ofPortHost := b.ofPortHost - bridgeIPs := b.ips - - var dftFlows []string - - strip_vlan := "" - match_vlan := "" - mod_vlan_id := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if ofPortPhys != "" { - // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports - actions := "" - for _, netConfig := range b.patchedNetConfigs() { - actions += "output:" + netConfig.OfPortPatch + "," - } - - actions += strip_vlan + "output:" + ofPortHost - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) - } - - // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all - // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range b.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch)) - } - - if config.IPv4Mode { - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range b.patchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // SNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range b.eipMarkIPs.GetIPv4() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.IsDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, - netConfig.MasqCTMark, ofPortPhys)) - - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host Commit connections with ct_mark CtMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range b.patchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - // table 0, packets coming from external or other localnet ports. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", - nodetypes.DefaultOpenFlowCookie, config.Default.ConntrackZone)) - } - } - - if config.IPv6Mode { - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range b.patchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // DNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range b.eipMarkIPs.GetIPv6() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.IsDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) - - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host. Commit connections with ct_mark CtMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) - - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range b.patchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - if ofPortPhys != "" { - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - if ofPortPhys != "" { - // table 0, packets coming from external. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ - "actions=ct(zone=%d, nat, table=1)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) - } - } - // Egress IP is often configured on a node different from the one hosting the affected pod. - // Due to the fact that ovn-controllers on different nodes apply the changes independently, - // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. - // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := b.netConfig[types.DefaultNetworkName] - if config.OVNKubernetesFeature.EnableEgressIP { - for _, clusterEntry := range config.Default.ClusterSubnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - // table 0, drop packets coming from pods headed externally that were not SNATed. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", - nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipv, ipv, cidr)) - } - for _, subnet := range defaultNetConfig.NodeSubnets { - ipv := getIPv(subnet) - if ofPortPhys != "" { - // table 0, commit connections from local pods. - // ICNIv2 requires that local pod traffic can leave the node without SNAT. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, bridgeMacAddress, ipv, ipv, subnet, - config.Default.ConntrackZone, nodetypes.CtMarkOVN, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - for _, netConfig := range b.patchedNetConfigs() { - isNetworkAdvertised := netConfig.Advertised.Load() - // disableSNATMultipleGWs only applies to default network - disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs - if !disableSNATMultipleGWs && !isNetworkAdvertised { - continue - } - output := netConfig.OfPortPatch - if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { - // except if advertised through BGP, go to kernel - // TODO: MEG enabled pods should still go through the patch port - // but holding this until - // https://issues.redhat.com/browse/FDP-646 is fixed, for now we - // are assuming MEG & BGP are not used together - output = nodetypes.OvsLocalPort - } - for _, clusterEntry := range netConfig.Subnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - nodetypes.DefaultOpenFlowCookie, ipv, ipv, cidr, output)) - } - if output == netConfig.OfPortPatch { - // except node management traffic - for _, subnet := range netConfig.NodeSubnets { - mgmtIP := util.GetNodeManagementIfAddr(subnet) - ipv := getIPv(mgmtIP) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - nodetypes.DefaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, nodetypes.OvsLocalPort), - ) - } - } - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - if config.IPv6Mode { - // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved - // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry - for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", - nodetypes.DefaultOpenFlowCookie, icmpType)) - } - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", - nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) - } - } - - if config.IPv4Mode { - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", - nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) - } - } - - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost)) - - // Send UDN destined traffic to right patch port - for _, netConfig := range b.patchedNetConfigs() { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, netConfig.OfPortPatch)) - } - } - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=1, table=11, "+ - "actions=output:%s", nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch)) - } - - // table 1, all other connections do normal processing - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", nodetypes.DefaultOpenFlowCookie)) - } - - return dftFlows, nil -} - -func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { - b.mutex.Lock() - defer b.mutex.Unlock() - var flows []string - if config.Gateway.Mode != config.GatewayModeShared { - return nil - } - for _, addr := range ipAddrs { - for _, netConfig := range b.patchedNetConfigs() { - flows = append(flows, - nodeutil.GenerateICMPFragmentationFlow(addr, nodetypes.OutputPortDrop, netConfig.OfPortPatch, nodetypes.PmtudOpenFlowCookie, 700)) - } - } - - return flows -} - -func getIPv(ipnet *net.IPNet) string { - prefix := "ip" - if utilnet.IsIPv6CIDR(ipnet) { - prefix = "ipv6" - } - return prefix -} - -// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: -// a. from pods in the OVN network to pods in a localnet network, on the same node -// b. from pods on the host to pods in a localnet network, on the same node -// when the localnet is mapped to breth0. -// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node -// primary interface. -func hostNetworkNormalActionFlows(netConfig *BridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { - var flows []string - var ipFamily, ipFamilyDest string - - if isV6 { - ipFamily = "ipv6" - ipFamilyDest = "ipv6_dst" - } else { - ipFamily = "ip" - ipFamilyDest = "nw_dst" - } - - formatFlow := func(inPort, destIP, ctMark string) string { - // Matching IP traffic will be handled by the bridge instead of being output directly - // to the NIC by the existing flow at prio=100. - flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(flowTemplate, - nodetypes.DefaultOpenFlowCookie, - inPort, - srcMAC, - ipFamily, - ipFamilyDest, - destIP, - config.Default.ConntrackZone, - ctMark) - } - - // Traffic path (a): OVN->localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { - continue - } - flows = append(flows, formatFlow(netConfig.OfPortPatch, hostSubnet.String(), netConfig.MasqCTMark)) - } - } - - // Traffic path (a): OVN->localnet for local gw mode - // Traffic path (b): host->localnet for both gw modes - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { - continue - } - flows = append(flows, formatFlow(nodetypes.OvsLocalPort, hostSubnet.String(), nodetypes.CtMarkHost)) - } - - if isV6 { - // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) - // that is unrelated to the host subnets matched in the prio=102 flow above. - // Allow neighbor discovery by matching against ICMP type and ingress port. - formatICMPFlow := func(inPort, ctMark string, icmpType int) string { - icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(icmpFlowTemplate, - nodetypes.DefaultOpenFlowCookie, - inPort, - srcMAC, - icmpType, - config.Default.ConntrackZone, - ctMark) - } - - for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { - // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - flows = append(flows, - formatICMPFlow(netConfig.OfPortPatch, netConfig.MasqCTMark, icmpType)) - } - - // Traffic path (a) for ICMP: OVN->localnet for local gw mode - // Traffic path (b) for ICMP: host->localnet for both gw modes - flows = append(flows, formatICMPFlow(nodetypes.OvsLocalPort, nodetypes.CtMarkHost, icmpType)) - } - } - return flows -} diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index f1281980a8..51dc1571e1 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -45,7 +45,6 @@ import ( nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/ovspinning" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/healthcheck" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" @@ -965,12 +964,8 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { // First part of gateway initialization. It will be completed by (nc *DefaultNodeNetworkController) Start() if config.OvnKubeNode.Mode != types.NodeModeDPUHost { - // IPv6 is not supported in DPU enabled nodes, error out if ovnkube is not set in IPv4 mode - if config.IPv6Mode && config.OvnKubeNode.Mode == types.NodeModeDPU { - return fmt.Errorf("IPv6 mode is not supported on a DPU enabled node") - } // Initialize gateway for OVS internal port or representor management port - gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController) + gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController, nodeAddr) if err != nil { return err } @@ -1063,7 +1058,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { netdevName = netdevs[0] config.Gateway.Interface = netdevName } - err = nc.initGatewayDPUHost(nc.nodeAddress, nodeAnnotator) + err = nc.initGatewayDPUHost(nc.nodeAddress) if err != nil { return err } @@ -1325,7 +1320,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if config.OVNKubernetesFeature.EnableEgressService { wf := nc.watchFactory.(*factory.WatchFactory) - c, err := egressservice.NewController(nc.stopChan, nodetypes.OvnKubeNodeSNATMark, nc.name, + c, err := egressservice.NewController(nc.stopChan, ovnKubeNodeSNATMark, nc.name, wf.EgressServiceInformer(), wf.ServiceInformer(), wf.EndpointSliceInformer()) if err != nil { return err diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index a1413a7dd1..875b0da694 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -22,7 +22,6 @@ import ( adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" @@ -810,10 +809,17 @@ var _ = Describe("Node", func() { Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } @@ -915,10 +921,17 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } @@ -1062,10 +1075,17 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } @@ -1166,10 +1186,17 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } @@ -1327,10 +1354,17 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } @@ -1448,10 +1482,17 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, } diff --git a/go-controller/pkg/node/egress_service_test.go b/go-controller/pkg/node/egress_service_test.go index ca44ac311d..bb4e57f5ca 100644 --- a/go-controller/pkg/node/egress_service_test.go +++ b/go-controller/pkg/node/egress_service_test.go @@ -19,7 +19,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" @@ -300,7 +299,7 @@ var _ = Describe("Egress Service Operations", func() { c, err := egressservice.NewController( stopChan, - nodetypes.OvnKubeNodeSNATMark, + ovnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -406,7 +405,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.4 comment "nam c, err := egressservice.NewController( stopChan, - nodetypes.OvnKubeNodeSNATMark, + ovnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -611,7 +610,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.3 comment "nam c, err := egressservice.NewController( stopChan, - nodetypes.OvnKubeNodeSNATMark, + ovnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -806,7 +805,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - nodetypes.OvnKubeNodeSNATMark, + ovnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -965,7 +964,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - nodetypes.OvnKubeNodeSNATMark, + ovnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), diff --git a/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go b/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go deleted file mode 100644 index d9d627c882..0000000000 --- a/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package egressip - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestNodeSuite(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Gateway EgressIP Suite") -} diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 9b43fc95a5..db1bcae279 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -17,8 +17,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -51,7 +49,7 @@ type gateway struct { nodePortWatcher informer.ServiceAndEndpointsEventHandler openflowManager *openflowManager nodeIPManager *addressManager - bridgeEIPAddrManager *egressip.BridgeEIPAddrManager + bridgeEIPAddrManager *bridgeEIPAddrManager initFunc func() error readyFunc func() (bool, error) @@ -60,8 +58,6 @@ type gateway struct { watchFactory *factory.WatchFactory // used for retry stopChan <-chan struct{} wg *sync.WaitGroup - - nextHops []net.IP } func (g *gateway) AddService(svc *corev1.Service) error { @@ -237,7 +233,7 @@ func (g *gateway) AddEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.AddEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.addEgressIP(eip) if err != nil { return err } @@ -253,7 +249,7 @@ func (g *gateway) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.UpdateEgressIP(oldEIP, newEIP) + isSyncRequired, err := g.bridgeEIPAddrManager.updateEgressIP(oldEIP, newEIP) if err != nil { return err } @@ -269,7 +265,7 @@ func (g *gateway) DeleteEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.DeleteEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.deleteEgressIP(eip) if err != nil { return err } @@ -285,7 +281,7 @@ func (g *gateway) SyncEgressIP(eips []interface{}) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - if err := g.bridgeEIPAddrManager.SyncEgressIP(eips); err != nil { + if err := g.bridgeEIPAddrManager.syncEgressIP(eips); err != nil { return err } if err := g.Reconcile(); err != nil { @@ -358,14 +354,14 @@ func setupUDPAggregationUplink(ifname string) error { func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops []net.IP, nodeSubnets, gwIPs []*net.IPNet, advertised bool, nodeAnnotator kube.Annotator) ( - *bridgeconfig.BridgeConfiguration, *bridgeconfig.BridgeConfiguration, error) { - gatewayBridge, err := bridgeconfig.NewBridgeConfiguration(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, advertised) + *bridgeConfiguration, *bridgeConfiguration, error) { + gatewayBridge, err := bridgeForInterface(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", gwIntf, err) } - var egressGWBridge *bridgeconfig.BridgeConfiguration + var egressGWBridge *bridgeConfiguration if egressGatewayIntf != "" { - egressGWBridge, err = bridgeconfig.NewBridgeConfiguration(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, false) + egressGWBridge, err = bridgeForInterface(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", egressGatewayIntf, err) } @@ -384,7 +380,7 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops "IP fragmentation or large TCP/UDP payloads may not be forwarded correctly.") enableGatewayMTU = false } else { - chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.GetBridgeName()) + chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.bridgeName) if err != nil { return nil, nil, err } @@ -418,9 +414,9 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } if config.Default.EnableUDPAggregation { - err = setupUDPAggregationUplink(gatewayBridge.GetUplinkName()) + err = setupUDPAggregationUplink(gatewayBridge.uplinkName) if err == nil && egressGWBridge != nil { - err = setupUDPAggregationUplink(egressGWBridge.GetUplinkName()) + err = setupUDPAggregationUplink(egressGWBridge.uplinkName) } if err != nil { klog.Warningf("Could not enable UDP packet aggregation on uplink interface (aggregation will be disabled): %v", err) @@ -429,37 +425,52 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } // Set static FDB entry for LOCAL port - if err := util.SetStaticFDBEntry(gatewayBridge.GetBridgeName(), gatewayBridge.GetBridgeName(), gatewayBridge.GetMAC()); err != nil { + if err := util.SetStaticFDBEntry(gatewayBridge.bridgeName, gatewayBridge.bridgeName, gatewayBridge.macAddress); err != nil { return nil, nil, err } l3GwConfig := util.L3GatewayConfig{ Mode: config.Gateway.Mode, ChassisID: chassisID, - BridgeID: gatewayBridge.GetBridgeName(), - InterfaceID: gatewayBridge.GetInterfaceID(), - MACAddress: gatewayBridge.GetMAC(), - IPAddresses: gatewayBridge.GetIPs(), + BridgeID: gatewayBridge.bridgeName, + InterfaceID: gatewayBridge.interfaceID, + MACAddress: gatewayBridge.macAddress, + IPAddresses: gatewayBridge.ips, NextHops: gwNextHops, NodePortEnable: config.Gateway.NodeportEnable, VLANID: &config.Gateway.VLANID, } if egressGWBridge != nil { - l3GwConfig.EgressGWInterfaceID = egressGWBridge.GetInterfaceID() - l3GwConfig.EgressGWMACAddress = egressGWBridge.GetMAC() - l3GwConfig.EgressGWIPAddresses = egressGWBridge.GetIPs() + l3GwConfig.EgressGWInterfaceID = egressGWBridge.interfaceID + l3GwConfig.EgressGWMACAddress = egressGWBridge.macAddress + l3GwConfig.EgressGWIPAddresses = egressGWBridge.ips } err = util.SetL3GatewayConfig(nodeAnnotator, &l3GwConfig) return gatewayBridge, egressGWBridge, err } +func gatewayReady(patchPort string) (bool, error) { + // Get ofport of patchPort + ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") + if err != nil || len(ofport) == 0 { + return false, nil + } + klog.Info("Gateway is ready") + return true, nil +} + func (g *gateway) GetGatewayBridgeIface() string { return g.openflowManager.getDefaultBridgeName() } func (g *gateway) GetGatewayIface() string { - return g.openflowManager.defaultBridge.GetGatewayIface() + return g.openflowManager.defaultBridge.gwIface +} + +// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle +func getMaxFrameLength() int { + return config.Default.MTU + 14 } // SetDefaultGatewayBridgeMAC updates the mac address for the OFM used to render flows with @@ -469,11 +480,11 @@ func (g *gateway) SetDefaultGatewayBridgeMAC(macAddr net.HardwareAddr) { } func (g *gateway) SetDefaultPodNetworkAdvertised(isPodNetworkAdvertised bool) { - g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Store(isPodNetworkAdvertised) + g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Store(isPodNetworkAdvertised) } func (g *gateway) GetDefaultPodNetworkAdvertised() bool { - return g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Load() + return g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Load() } // Reconcile handles triggering updates to different components of a gateway, like OFM, Services @@ -527,3 +538,202 @@ func (g *gateway) updateSNATRules() error { return addLocalGatewayPodSubnetNATRules(subnets...) } + +type bridgeConfiguration struct { + sync.Mutex + nodeName string + bridgeName string + uplinkName string + gwIface string + gwIfaceRep string + ips []*net.IPNet + interfaceID string + macAddress net.HardwareAddr + ofPortPhys string + ofPortHost string + netConfig map[string]*bridgeUDNConfiguration + eipMarkIPs *markIPsCache + nextHops []net.IP +} + +// updateInterfaceIPAddresses sets and returns the bridge's current ips +func (b *bridgeConfiguration) updateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { + b.Lock() + defer b.Unlock() + ifAddrs, err := getNetworkInterfaceIPAddresses(b.gwIface) + if err != nil { + return nil, err + } + + // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's + // host internal IP address instead of the DPU's external bridge IP address. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + nodeAddrStr, err := util.GetNodePrimaryIP(node) + if err != nil { + return nil, err + } + nodeAddr := net.ParseIP(nodeAddrStr) + if nodeAddr == nil { + return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) + } + ifAddrs, err = getDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) + if err != nil { + return nil, err + } + } + + b.ips = ifAddrs + return ifAddrs, nil +} + +func bridgeForInterface(intfName, nodeName, + physicalNetworkName string, + nodeSubnets, gwIPs []*net.IPNet, + gwNextHops []net.IP, + advertised bool) (*bridgeConfiguration, error) { + var intfRep string + var err error + isGWAcclInterface := false + gwIntf := intfName + + defaultNetConfig := &bridgeUDNConfiguration{ + masqCTMark: ctMarkOVN, + subnets: config.Default.ClusterSubnets, + nodeSubnets: nodeSubnets, + } + res := bridgeConfiguration{ + nodeName: nodeName, + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + eipMarkIPs: newMarkIPsCache(), + } + if len(gwNextHops) > 0 { + res.nextHops = gwNextHops + } + res.netConfig[types.DefaultNetworkName].advertised.Store(advertised) + + if config.Gateway.GatewayAcceleratedInterface != "" { + // Try to get representor for the specified gateway device. + // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device + // for node IP, Host Ofport for Openflow etc. + // If failed - error for improper configuration option + intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) + if err != nil { + return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) + } + gwIntf = config.Gateway.GatewayAcceleratedInterface + isGWAcclInterface = true + klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) + } else { + intfRep, err = getRepresentor(gwIntf) + if err == nil { + isGWAcclInterface = true + } + } + + if isGWAcclInterface { + bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) + if err != nil { + return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) + } + link, err := util.GetNetLinkOps().LinkByName(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) + } + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.bridgeName = bridgeName + res.uplinkName = uplinkName + res.gwIfaceRep = intfRep + res.gwIface = gwIntf + res.macAddress = link.Attrs().HardwareAddr + } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { + // This is an OVS bridge's internal port + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = uplinkName + gwIntf = bridgeName + } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { + // This is not a OVS bridge. We need to create a OVS bridge + // and add cluster.GatewayIntf as a port of that bridge. + bridgeName, err := util.NicToBridge(intfName) + if err != nil { + return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) + } + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = intfName + gwIntf = bridgeName + } else { + // gateway interface is an OVS bridge + uplinkName, err := getIntfName(intfName) + if err != nil { + if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { + klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) + } else { + return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) + } + } else { + res.uplinkName = uplinkName + } + res.bridgeName = intfName + res.gwIface = intfName + } + // Now, we get IP addresses for the bridge + if len(gwIPs) > 0 { + // use gwIPs if provided + res.ips = gwIPs + } else { + // get IP addresses from OVS bridge. If IP does not exist, + // error out. + res.ips, err = getNetworkInterfaceIPAddresses(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) + } + } + + if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface + res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) + } + } + + res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) + if err != nil { + return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) + } + + // the name of the patch port created by ovn-controller is of the form + // patch--to-br-int + defaultNetConfig.patchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) + + // for DPU we use the host MAC address for the Gateway configuration + if config.OvnKubeNode.Mode == types.NodeModeDPU { + hostRep, err := util.GetDPUHostInterface(res.bridgeName) + if err != nil { + return nil, err + } + res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) + if err != nil { + return nil, err + } + } + return &res, nil +} + +func getRepresentor(intfName string) (string, error) { + deviceID, err := util.GetDeviceIDFromNetdevice(intfName) + if err != nil { + return "", err + } + + return util.GetFunctionRepresentorName(deviceID) +} diff --git a/go-controller/pkg/node/egressip/gateway_egressip.go b/go-controller/pkg/node/gateway_egressip.go similarity index 91% rename from go-controller/pkg/node/egressip/gateway_egressip.go rename to go-controller/pkg/node/gateway_egressip.go index 38bd2b058e..13e41c4542 100644 --- a/go-controller/pkg/node/egressip/gateway_egressip.go +++ b/go-controller/pkg/node/gateway_egressip.go @@ -1,4 +1,4 @@ -package egressip +package node import ( "encoding/json" @@ -75,15 +75,15 @@ func (e markIPs) containsIP(ip net.IP) bool { return false } -type MarkIPsCache struct { +type markIPsCache struct { mu sync.Mutex hasSyncOnce bool markToIPs markIPs IPToMark map[string]int } -func NewMarkIPsCache() *MarkIPsCache { - return &MarkIPsCache{ +func newMarkIPsCache() *markIPsCache { + return &markIPsCache{ mu: sync.Mutex{}, markToIPs: markIPs{ v4: make(map[int]string), @@ -93,7 +93,7 @@ func NewMarkIPsCache() *MarkIPsCache { } } -func (mic *MarkIPsCache) IsIPPresent(ip net.IP) bool { +func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -103,7 +103,7 @@ func (mic *MarkIPsCache) IsIPPresent(ip net.IP) bool { return isFound } -func (mic *MarkIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -113,7 +113,7 @@ func (mic *MarkIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.IPToMark[ip.String()] = pktMark.ToInt() } -func (mic *MarkIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -123,7 +123,7 @@ func (mic *MarkIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { delete(mic.IPToMark, ip.String()) } -func (mic *MarkIPsCache) replaceAll(markIPs markIPs) { +func (mic *markIPsCache) replaceAll(markIPs markIPs) { mic.mu.Lock() mic.markToIPs = markIPs for mark, ipv4 := range markIPs.v4 { @@ -135,7 +135,7 @@ func (mic *MarkIPsCache) replaceAll(markIPs markIPs) { mic.mu.Unlock() } -func (mic *MarkIPsCache) GetIPv4() map[int]string { +func (mic *markIPsCache) GetIPv4() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -148,7 +148,7 @@ func (mic *MarkIPsCache) GetIPv4() map[int]string { return dupe } -func (mic *MarkIPsCache) GetIPv6() map[int]string { +func (mic *markIPsCache) GetIPv6() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -161,19 +161,19 @@ func (mic *MarkIPsCache) GetIPv6() map[int]string { return dupe } -func (mic *MarkIPsCache) HasSyncdOnce() bool { +func (mic *markIPsCache) HasSyncdOnce() bool { mic.mu.Lock() defer mic.mu.Unlock() return mic.hasSyncOnce } -func (mic *MarkIPsCache) setSyncdOnce() { +func (mic *markIPsCache) setSyncdOnce() { mic.mu.Lock() mic.hasSyncOnce = true mic.mu.Unlock() } -type BridgeEIPAddrManager struct { +type bridgeEIPAddrManager struct { nodeName string bridgeName string nodeAnnotationMu sync.Mutex @@ -182,18 +182,18 @@ type BridgeEIPAddrManager struct { nodeLister corev1listers.NodeLister kube kube.Interface addrManager *linkmanager.Controller - cache *MarkIPsCache + cache *markIPsCache } -// NewBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user +// newBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user // defined networks. It saves the assigned IPs to its respective Node annotation in-order to understand which IPs it assigned // prior to restarting. // It provides the assigned IPs info node IP handler. Node IP handler must not consider assigned EgressIP IPs as possible node IPs. // Openflow manager must generate the SNAT openflow conditional on packet marks and therefore needs access to EIP IPs and associated packet marks. -// BridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. -func NewBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, - kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *BridgeEIPAddrManager { - return &BridgeEIPAddrManager{ +// bridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. +func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, + kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *bridgeEIPAddrManager { + return &bridgeEIPAddrManager{ nodeName: nodeName, // k8 node name bridgeName: bridgeName, // bridge name for which EIP IPs are managed nodeAnnotationMu: sync.Mutex{}, // mu for updating Node annotation @@ -202,15 +202,15 @@ func NewBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanag nodeLister: nodeInformer.Lister(), kube: kube, addrManager: linkManager, - cache: NewMarkIPsCache(), // cache to store pkt mark -> EIP IP. + cache: newMarkIPsCache(), // cache to store pkt mark -> EIP IP. } } -func (g *BridgeEIPAddrManager) GetCache() *MarkIPsCache { +func (g *bridgeEIPAddrManager) GetCache() *markIPsCache { return g.cache } -func (g *BridgeEIPAddrManager) AddEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -237,7 +237,7 @@ func (g *BridgeEIPAddrManager) AddEgressIP(eip *egressipv1.EgressIP) (bool, erro return isUpdated, nil } -func (g *BridgeEIPAddrManager) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { +func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { var isUpdated bool // at most, one status item for this node will be found. for _, oldStatus := range oldEIP.Status.Items { @@ -293,7 +293,7 @@ func (g *BridgeEIPAddrManager) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressI return isUpdated, nil } -func (g *BridgeEIPAddrManager) DeleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -322,7 +322,7 @@ func (g *BridgeEIPAddrManager) DeleteEgressIP(eip *egressipv1.EgressIP) (bool, e return isUpdated, nil } -func (g *BridgeEIPAddrManager) SyncEgressIP(objs []interface{}) error { +func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { // caller must synchronise annotIPs, err := g.getAnnotationIPs() if err != nil { @@ -380,7 +380,7 @@ func (g *BridgeEIPAddrManager) SyncEgressIP(objs []interface{}) error { // addIPToAnnotation adds an address to the collection of existing addresses stored in the nodes annotation. Caller // may repeat addition of addresses without care for duplicate addresses being added. -func (g *BridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { +func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -412,7 +412,7 @@ func (g *BridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { // deleteIPsFromAnnotation deletes address from annotation. If multiple users, callers must synchronise. // deletion of address that doesn't exist will not cause an error. -func (g *BridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { +func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -446,7 +446,7 @@ func (g *BridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) e }) } -func (g *BridgeEIPAddrManager) addIPBridge(ip net.IP) error { +func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -454,7 +454,7 @@ func (g *BridgeEIPAddrManager) addIPBridge(ip net.IP) error { return g.addrManager.AddAddress(getEIPBridgeNetlinkAddress(ip, link.Attrs().Index)) } -func (g *BridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { +func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -464,7 +464,7 @@ func (g *BridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { // getAnnotationIPs retrieves the egress IP annotation from the current node Nodes object. If multiple users, callers must synchronise. // if annotation isn't present, empty set is returned -func (g *BridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { +func (g *bridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { node, err := g.nodeLister.Get(g.nodeName) if err != nil { return nil, fmt.Errorf("failed to get node %s from lister: %v", g.nodeName, err) diff --git a/go-controller/pkg/node/egressip/gateway_egressip_test.go b/go-controller/pkg/node/gateway_egressip_test.go similarity index 95% rename from go-controller/pkg/node/egressip/gateway_egressip_test.go rename to go-controller/pkg/node/gateway_egressip_test.go index 07a03a87b6..db43f7450a 100644 --- a/go-controller/pkg/node/egressip/gateway_egressip_test.go +++ b/go-controller/pkg/node/gateway_egressip_test.go @@ -1,4 +1,4 @@ -package egressip +package node import ( "fmt" @@ -67,7 +67,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.AddEgressIP(eip) + isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -82,7 +82,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "", ipV4Addr) - isUpdated, err := addrMgr.AddEgressIP(eip) + isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -97,7 +97,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "not-an-integer", ipV4Addr) - isUpdated, err := addrMgr.AddEgressIP(eip) + isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).Should(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -117,7 +117,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.AddEgressIP(eip) + isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -140,7 +140,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -162,10 +162,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP, unassignedEIP) + isUpdated, err = addrMgr.updateEgressIP(assignedEIP, unassignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -191,10 +191,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) assignedEIP1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) assignedEIP2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP1) + isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP1) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP1, assignedEIP2) + isUpdated, err = addrMgr.updateEgressIP(assignedEIP1, assignedEIP2) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -221,10 +221,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.AddEgressIP(eip) + isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.DeleteEgressIP(eip) + isUpdated, err = addrMgr.deleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -240,7 +240,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.DeleteEgressIP(eip) + isUpdated, err := addrMgr.deleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -265,7 +265,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) eipUnassigned3 := getEIPNotAssignedToNode(mark3, ipV4Addr3) - err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -289,7 +289,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -306,7 +306,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) // previously configured IP defer stopFn() eipAssigned := getEIPAssignedToNode(nodeName, "", ipV4Addr) - err := addrMgr.SyncEgressIP([]interface{}{eipAssigned}) + err := addrMgr.syncEgressIP([]interface{}{eipAssigned}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -315,7 +315,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { }) }) -func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*BridgeEIPAddrManager, func()) { +func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*bridgeEIPAddrManager, func()) { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: nodeName, Annotations: map[string]string{}}, } @@ -327,7 +327,7 @@ func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string gomega.Expect(watchFactory.Start()).Should(gomega.Succeed(), "watch factory should start") gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "watch factory creation must succeed") linkManager := linkmanager.NewController(nodeName, true, true, nil) - return NewBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), + return newBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), watchFactory.Shutdown } diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index b4d11d69cf..28e0fa669b 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -9,7 +9,6 @@ import ( "github.com/vishvananda/netlink" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -19,11 +18,96 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" - nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) +// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, +// and returns an ifaceID created from the bridge name and the node name +func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { + // IPv6 forwarding is enabled globally + if config.IPv4Mode { + // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { + return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", + bridgeName, stdout, stderr, err) + } + } + + // ovn-bridge-mappings maps a physical network name to a local ovs bridge + // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. + // Note that there may be multiple ovs bridge mappings, be sure not to override + // the mappings for the other physical network + stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", + "external_ids:ovn-bridge-mappings") + if err != nil { + return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) + } + // skip the existing mapping setting for the specified physicalNetworkName + mapString := "" + bridgeMappings := strings.Split(stdout, ",") + for _, bridgeMapping := range bridgeMappings { + m := strings.Split(bridgeMapping, ":") + if network := m[0]; network != physicalNetworkName { + if len(mapString) != 0 { + mapString += "," + } + mapString += bridgeMapping + } + } + if len(mapString) != 0 { + mapString += "," + } + mapString += physicalNetworkName + ":" + bridgeName + + _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", + fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) + if err != nil { + return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ + ", stderr:%s (%v)", bridgeName, stderr, err) + } + + ifaceID := bridgeName + "_" + nodeName + return ifaceID, nil +} + +// getNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. +func getNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { + allIPs, err := util.GetFilteredInterfaceV4V6IPs(iface) + if err != nil { + return nil, fmt.Errorf("could not find IP addresses: %v", err) + } + + var ips []*net.IPNet + var foundIPv4 bool + var foundIPv6 bool + for _, ip := range allIPs { + if utilnet.IsIPv6CIDR(ip) { + if config.IPv6Mode && !foundIPv6 { + // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet + // in the routing table + subnetIP, err := util.GetIPv6OnSubnet(iface, ip) + if err != nil { + return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) + } + ips = append(ips, subnetIP) + foundIPv6 = true + } + } else if config.IPv4Mode && !foundIPv4 { + ips = append(ips, ip) + foundIPv4 = true + } + } + if config.IPv4Mode && !foundIPv4 { + return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) + } else if config.IPv6Mode && !foundIPv6 { + return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) + } + return ips, nil +} + func getGatewayNextHops() ([]net.IP, string, error) { var gatewayNextHops []net.IP var needIPv4NextHop bool @@ -134,6 +218,52 @@ func getGatewayNextHops() ([]net.IP, string, error) { return gatewayNextHops, gatewayIntf, nil } +// getDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP +// and DPU IP subnet overriden by config config.Gateway.RouterSubnet +func getDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { + // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information + // for each node. + var gwIps []*net.IPNet + isIPv4 := utilnet.IsIPv4(k8sNodeIP) + + // override subnet mask via config + if config.Gateway.RouterSubnet != "" { + _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) + if err != nil { + return nil, err + } + if utilnet.IsIPv4CIDR(addr) != isIPv4 { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "does not match Node IP address format", config.Gateway.RouterSubnet) + } + if !addr.Contains(k8sNodeIP) { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) + } + addr.IP = k8sNodeIP + gwIps = append(gwIps, addr) + } else { + // Assume Host and DPU share the same subnet + // in this case just update the matching IPNet with the Host's IP address + for _, addr := range ifAddrs { + if utilnet.IsIPv4CIDR(addr) != isIPv4 { + continue + } + // expect k8s Node IP to be contained in the given subnet + if !addr.Contains(k8sNodeIP) { + continue + } + newAddr := *addr + newAddr.IP = k8sNodeIP + gwIps = append(gwIps, &newAddr) + } + if len(gwIps) == 0 { + return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) + } + } + return gwIps, nil +} + // getInterfaceByIP retrieves Interface that has `ip` assigned to it func getInterfaceByIP(ip net.IP) (string, error) { links, err := util.GetNetLinkOps().LinkList() @@ -187,39 +317,6 @@ func configureSvcRouteViaInterface(routeManager *routemanager.Controller, iface return nil } -// getNodePrimaryIfAddrs returns the appropriate interface addresses based on the node mode -func getNodePrimaryIfAddrs(watchFactory factory.NodeWatchFactory, nodeName string, gatewayIntf string) ([]*net.IPNet, error) { - switch config.OvnKubeNode.Mode { - case types.NodeModeDPU: - // For DPU mode, use the host IP address from node annotation - node, err := watchFactory.GetNode(nodeName) - if err != nil { - return nil, fmt.Errorf("error retrieving node %s: %v", nodeName, err) - } - - // Extract the primary DPU address annotation from the node - nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) - if err != nil { - return nil, err - } - - if nodeIfAddr.IPv4 == "" { - return nil, fmt.Errorf("node primary DPU address annotation is empty for node %s", nodeName) - } - - nodeIP, nodeAddrs, err := net.ParseCIDR(nodeIfAddr.IPv4) - if err != nil { - return nil, fmt.Errorf("failed to parse node IP address %s: %v", nodeIfAddr.IPv4, err) - } - - nodeAddrs.IP = nodeIP - return []*net.IPNet{nodeAddrs}, nil - default: - // For other modes, get network interface IP addresses directly - return nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) - } -} - // initGatewayPreStart executes the first part of the gateway initialization for the node. // It creates the gateway object, the node IP manager, openflow manager and node port watcher // once OVN controller is ready and the patch port exists for this node. @@ -229,6 +326,7 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( subnets []*net.IPNet, nodeAnnotator kube.Annotator, mgmtPort managementport.Interface, + kubeNodeIP net.IP, ) (*gateway, error) { klog.Info("Initializing Gateway Functionality for Gateway PreStart") @@ -247,12 +345,20 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( egressGWInterface = interfaceForEXGW(config.Gateway.EgressGWInterface) } - // Get interface addresses based on node mode - ifAddrs, err = getNodePrimaryIfAddrs(nc.watchFactory, nc.name, gatewayIntf) + ifAddrs, err = getNetworkInterfaceIPAddresses(gatewayIntf) if err != nil { return nil, err } + // For DPU need to use the host IP addr which currently is assumed to be K8s Node cluster + // internal IP address. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + ifAddrs, err = getDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) + if err != nil { + return nil, err + } + } + if err := util.SetNodePrimaryIfAddrs(nodeAnnotator, ifAddrs); err != nil { klog.Errorf("Unable to set primary IP net label on node, err: %v", err) } @@ -368,7 +474,7 @@ func (nc *DefaultNodeNetworkController) initGatewayMainStart(gw *gateway, waiter // interfaceForEXGW takes the interface requested to act as exgw bridge // and returns the name of the bridge if exists, or the interface itself -// if the bridge needs to be created. In this last scenario, BridgeForInterface +// if the bridge needs to be created. In this last scenario, bridgeForInterface // will create the bridge. func interfaceForEXGW(intfName string) string { if _, _, err := util.RunOVSVsctl("br-exists", intfName); err == nil { @@ -384,7 +490,7 @@ func interfaceForEXGW(intfName string) string { return intfName } -func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, nodeAnnotator kube.Annotator) error { +func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) error { // A DPU host gateway is complementary to the shared gateway running // on the DPU embedded CPU. it performs some initializations and // watch on services for iptable rule updates and run a loadBalancerHealth checker @@ -392,71 +498,35 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, no klog.Info("Initializing Shared Gateway Functionality on DPU host") var err error - // Find the network interface that has the Kubernetes node IP assigned to it - // This interface will be used for DPU host gateway operations - kubeIntf, err := getInterfaceByIP(kubeNodeIP) + // Force gateway interface to be the interface associated with kubeNodeIP + gwIntf, err := getInterfaceByIP(kubeNodeIP) if err != nil { return err } + config.Gateway.Interface = gwIntf - // Get all IP addresses (IPv4 and IPv6) configured on the detected interface - ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(kubeIntf) + _, gatewayIntf, err := getGatewayNextHops() if err != nil { return err } - // Extract the IPv4 address from the interface addresses for node annotation - nodeIPNet, _ := util.MatchFirstIPNetFamily(false, ifAddrs) - nodeAddrSet := sets.New[string](nodeIPNet.String()) - - // If no gateway interface is explicitly configured, use the detected interface - if config.Gateway.Interface == "" { - config.Gateway.Interface = kubeIntf - } - - // If a different gateway interface is configured than the one with used for the kubernetes node IP, - // get its addresses and add them to the node address set for routing purposes - if config.Gateway.Interface != kubeIntf { - ifAddrs, err = nodeutil.GetNetworkInterfaceIPAddresses(config.Gateway.Interface) - if err != nil { - return err - } - detectedIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) - nodeAddrSet.Insert(detectedIPNetv4.String()) - // Use the configured interface for the masquerade route instead of the auto-detected one - kubeIntf = config.Gateway.Interface - } - - // Set the primary DPU address annotation on the node with the interface addresses - if err := util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ifAddrs); err != nil { - klog.Errorf("Unable to set primary IP net label on node, err: %v", err) - return err - } - - // Set the host CIDRs annotation to include all detected network addresses - // This helps with routing decisions for traffic coming from the host - if err := util.SetNodeHostCIDRs(nodeAnnotator, nodeAddrSet); err != nil { - klog.Errorf("Unable to set host-cidrs on node, err: %v", err) + ifAddrs, err := getNetworkInterfaceIPAddresses(gatewayIntf) + if err != nil { return err } - // Apply all node annotations to the Kubernetes node object - if err := nodeAnnotator.Run(); err != nil { - return fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) - } - // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(kubeIntf, nc.name, nc.watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gwIntf, nc.name, nc.watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(kubeIntf); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", kubeIntf, err) + if err := setNodeMasqueradeIPOnExtBridge(gwIntf); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwIntf, err) } - if err := addMasqueradeRoute(nc.routeManager, kubeIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { + if err := addMasqueradeRoute(nc.routeManager, gwIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -465,7 +535,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, no return fmt.Errorf("failed to update masquerade subnet annotation on node: %s, error: %v", nc.name, err) } - err = configureSvcRouteViaInterface(nc.routeManager, config.Gateway.Interface, DummyNextHopIPs()) + err = configureSvcRouteViaInterface(nc.routeManager, gatewayIntf, DummyNextHopIPs()) if err != nil { return err } @@ -491,7 +561,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, no gw.portClaimWatcher = portClaimWatcher } - if err := addHostMACBindings(kubeIntf); err != nil { + if err := addHostMACBindings(gwIntf); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing") } @@ -535,7 +605,7 @@ func CleanupClusterNode(name string) error { func (nc *DefaultNodeNetworkController) updateGatewayMAC(link netlink.Link) error { // TBD-merge for dpu-host mode: if interface mac of the dpu-host interface that connects to the // gateway bridge on the dpu changes, we need to update dpu's gatewayBridge.macAddress L3 gateway - // annotation (see BridgeForInterface) + // annotation (see bridgeForInterface) if config.OvnKubeNode.Mode != types.NodeModeFull { return nil } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 79886dbf38..efa9f0f38f 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -572,7 +572,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, // exec Mocks fexec := ovntest.NewLooseCompareFakeExec() // gatewayInitInternal - // BridgeForInterface + // bridgeForInterface fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 port-to-br " + brphys, Err: fmt.Errorf(""), @@ -733,9 +733,6 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, k := &kube.Kube{KClient: kubeFakeClient} nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) - err = util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) - config.Gateway.RouterSubnet = nodeSubnet - Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) @@ -904,11 +901,8 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() - k := &kube.Kube{KClient: kubeFakeClient} - - nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) - err := nc.initGatewayDPUHost(net.ParseIP(hostIP), nodeAnnotator) + err := nc.initGatewayDPUHost(net.ParseIP(hostIP)) Expect(err).NotTo(HaveOccurred()) link, err := netlink.LinkByName(uplinkName) @@ -1677,6 +1671,47 @@ var _ = Describe("Gateway unit tests", func() { util.SetNetLinkOpMockInst(origNetlinkInst) }) + Context("getDPUHostPrimaryIPAddresses", func() { + + It("returns Gateway IP/Subnet for kubernetes node IP", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, + } + gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in host subnets", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.1.11") + _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + + It("returns node IP with config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.1.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, + } + gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + }) + Context("getInterfaceByIP", func() { It("Finds correct interface", func() { lnk := &linkMock.Link{} diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index 49e4d1ee13..013234e1b1 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -21,7 +21,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -57,8 +56,9 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) - defaultBridge := bridgeconfig.TestDefaultBridgeConfig() - defaultBridge.SetMAC(gwMACParsed) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } fNPW := nodePortWatcher{ ofportPhys: "eth0", @@ -66,11 +66,15 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gatewayIPv6: v6localnetGatewayIP, serviceInfo: make(map[k8stypes.NamespacedName]*serviceConfig), ofm: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: defaultBridge, + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + macAddress: gwMACParsed, + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, }, networkManager: networkmanager.Default().Interface(), - gwBridge: bridgeconfig.TestBridgeConfig(""), } return &fNPW } diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index 842bb417d1..6e341466ab 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -12,7 +12,6 @@ import ( utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -69,10 +68,10 @@ func getNoSNATLoadBalancerIPRules(svcPort corev1.ServicePort, localEndpoints []s // getUDNNodePortMarkNFTRule returns a verdict map element (nftablesUDNMarkNodePortsMap) // with a key composed of the svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeconfig.BridgeUDNConfiguration) *knftables.Element { +func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeUDNConfiguration) *knftables.Element { var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} } return &knftables.Element{ Map: nftablesUDNMarkNodePortsMap, @@ -85,12 +84,12 @@ func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeconfig // getUDNExternalIPsMarkNFTRules returns a verdict map elements (nftablesUDNMarkExternalIPsV4Map or nftablesUDNMarkExternalIPsV6Map) // with a key composed of the external IP, svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { +func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeUDNConfiguration) []*knftables.Element { var nftRules []*knftables.Element var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} } for _, externalIP := range externalIPs { mapName := nftablesUDNMarkExternalIPsV4Map @@ -176,7 +175,7 @@ func getGatewayNFTRules(service *corev1.Service, localEndpoints []string, svcHas // getUDNNFTRules generates nftables rules for a UDN service. // If netConfig is nil, the resulting map elements will have empty values, // suitable only for entry removal. -func getUDNNFTRules(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { +func getUDNNFTRules(service *corev1.Service, netConfig *bridgeUDNConfiguration) []*knftables.Element { rules := make([]*knftables.Element, 0) for _, svcPort := range service.Spec.Ports { if util.ServiceTypeHasNodePort(service) { diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index bd83448ba4..535ca7db2b 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -26,25 +26,36 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" - nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) const ( + // defaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. + // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. + defaultOpenFlowCookie = "0xdeff105" // etpSvcOpenFlowCookie identifies constant open flow rules added to the host OVS // bridge to move packets between host and external for etp=local traffic. // The hex number 0xe745ecf105, represents etp(e74)-service(5ec)-flows which makes it easier for debugging. etpSvcOpenFlowCookie = "0xe745ecf105" + // pmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, + // fragmentation-needed (4) + pmtudOpenFlowCookie = "0x0304" + // ovsLocalPort is the name of the OVS bridge local port + ovsLocalPort = "LOCAL" + // ctMarkOVN is the conntrack mark value for OVN traffic + ctMarkOVN = "0x1" + // ctMarkHost is the conntrack mark value for host traffic + ctMarkHost = "0x2" + // ovnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for + // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. + ovnKubeNodeSNATMark = "0x3f0" // nftablesUDNServicePreroutingChain is a base chain registered into the prerouting hook, // and it contains one rule that jumps to nftablesUDNServiceMarkChain. @@ -81,6 +92,10 @@ const ( // to the appropriate network. nftablesUDNMarkExternalIPsV4Map = "udn-mark-external-ips-v4" nftablesUDNMarkExternalIPsV6Map = "udn-mark-external-ips-v6" + + // outputPortDrop is used to signify that there is no output port for an openflow action and the + // rendered action should result in a drop + outputPortDrop = "output-port-drop" ) // configureUDNServicesNFTables configures the nftables chains, rules, and verdict maps @@ -190,7 +205,7 @@ type nodePortWatcher struct { gatewayIPv6 string gatewayIPLock sync.Mutex ofportPhys string - gwBridge *bridgeconfig.BridgeConfiguration + gwBridge string // Map of service name to programmed iptables/OF rules serviceInfo map[ktypes.NamespacedName]*serviceConfig serviceInfoLock sync.Mutex @@ -216,9 +231,11 @@ type cidrAndFlags struct { validLifetime int } -func (npw *nodePortWatcher) updateGatewayIPs() { +func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(npw.gwBridge.GetIPs()) + addressManager.gatewayBridge.Lock() + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.ips) + addressManager.gatewayBridge.Unlock() npw.gatewayIPLock.Lock() defer npw.gatewayIPLock.Unlock() @@ -247,7 +264,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI return nil } - var netConfig *bridgeconfig.BridgeUDNConfiguration + var netConfig *bridgeUDNConfiguration var actions string if add { @@ -255,7 +272,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI if netConfig == nil { return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) } - actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) + actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) } // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure @@ -335,7 +352,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // table=0, matches on return traffic from service nodePort and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) + cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) } } } @@ -368,11 +385,11 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI var ofPorts []string // don't get the ports unless we need to as it is a costly operation if (len(extParsedIPs) > 0 || len(ingParsedIPs) > 0) && add { - ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge.GetGatewayIface(), false) + ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge, false) if err != nil { // in the odd case that getting all ports from the bridge should not work, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) - klog.Warningf("Unable to get port list from bridge. Using OvsLocalPort as output only: error: %v", + klog.Warningf("Unable to get port list from bridge. Using ovsLocalPort as output only: error: %v", err) } } @@ -405,14 +422,14 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI ipPrefix = "ipv6" } // table 2, user-defined network host -> OVN towards default cluster network services - defaultNetConfig := npw.ofm.defaultBridge.GetActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) + defaultNetConfig := npw.ofm.defaultBridge.getActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) // sample flow: cookie=0xdeff105, duration=2319.685s, table=2, n_packets=496, n_bytes=67111, priority=300, // ip,nw_dst=10.96.0.1 actions=mod_dl_dst:02:42:ac:12:00:03,output:"patch-breth0_ov" // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network flows := []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, - npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)} + defaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, + npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.ofPortPatch)} if util.IsRouteAdvertisementsEnabled() { // if the network is advertised, then for the reply from kapi and dns services to go back // into the UDN's VRF we need flows that statically send this to the local port @@ -425,7 +442,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, ip, ip_src=%s,actions=ct(zone=%d,nat,table=3)", - nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) } npw.ofm.updateFlowCacheEntry(key, flows) } @@ -452,7 +469,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // `actions`: "send to patchport" // `externalIPOrLBIngressIP` is either externalIP.IP or LB.status.ingress.IP // `ipType` is either "External" or "Ingress" -func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, +func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, hasLocalHostNetworkEp bool, protocol string, actions string, externalIPOrLBIngressIPs []string, ipType string, ofPorts []string) error { for _, externalIPOrLBIngressIP := range externalIPOrLBIngressIPs { @@ -483,7 +500,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, continue } // add the ARP bypass flow regardless of service type or gateway modes since its applicable in all scenarios. - arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.OfPortPatch, externalIPOrLBIngressIP, cookie) + arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.ofPortPatch, externalIPOrLBIngressIP, cookie) externalIPFlows = append(externalIPFlows, arpFlow) // This allows external traffic ingress when the svc's ExternalTrafficPolicy is // set to Local, and the backend pod is HostNetworked. We need to add @@ -520,7 +537,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, etpSvcOpenFlowCookie, npw.ofportPhys)) } else if config.Gateway.Mode == config.GatewayModeShared { // add the ICMP Fragmentation flow for shared gateway mode. - icmpFlow := nodeutil.GenerateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.OfPortPatch, npw.ofportPhys, cookie, 110) + icmpFlow := generateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.ofPortPatch, npw.ofportPhys, cookie, 110) externalIPFlows = append(externalIPFlows, icmpFlow) // case2 (see function description for details) externalIPFlows = append(externalIPFlows, @@ -531,7 +548,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, // table=0, matches on return traffic from service externalIP or LB ingress and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, %s=%s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) + cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) } npw.ofm.updateFlowCacheEntry(key, externalIPFlows) } @@ -556,7 +573,7 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) arpFlow = fmt.Sprintf("cookie=%s, priority=110, in_port=%s, %s, %s=%s, "+ "actions=output:%s", - cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, nodetypes.OvsLocalPort) + cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, ovsLocalPort) } else { // cover the case where breth0 has more than 3 ports, e.g. if an admin adds a 4th port // and the ExternalIP would be on that port @@ -586,6 +603,31 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, return arpFlow } +func generateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { + // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that + // path MTU discovery continues to work. + icmpMatch := "icmp" + icmpType := 3 + icmpCode := 4 + nwDst := "nw_dst" + if utilnet.IsIPv6String(ipAddr) { + icmpMatch = "icmp6" + icmpType = 2 + icmpCode = 0 + nwDst = "ipv6_dst" + } + + action := fmt.Sprintf("output:%s", outputPort) + if outputPort == outputPortDrop { + action = "drop" + } + + icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ + "icmp_code=%d, actions=%s", + cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) + return icmpFragmentationFlow +} + // getAndDeleteServiceInfo returns the serviceConfig for a service and if it exists and then deletes the entry func (npw *nodePortWatcher) getAndDeleteServiceInfo(index ktypes.NamespacedName) (out *serviceConfig, exists bool) { npw.serviceInfoLock.Lock() @@ -664,7 +706,7 @@ func addServiceRules(service *corev1.Service, netInfo util.NetInfo, localEndpoin // For dpu or Full mode var err error var errors []error - var activeNetwork *bridgeconfig.BridgeUDNConfiguration + var activeNetwork *bridgeUDNConfiguration if npw != nil { if err = npw.updateServiceFlowCache(service, netInfo, true, svcHasLocalHostNetEndPnt); err != nil { errors = append(errors, err) @@ -1409,6 +1451,942 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro return utilerrors.Join(errors...) } +func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + + ofPortPhys := bridge.ofPortPhys + bridgeMacAddress := bridge.macAddress.String() + ofPortHost := bridge.ofPortHost + bridgeIPs := bridge.ips + + var dftFlows []string + // 14 bytes of overhead for ethernet header (does not include VLAN) + maxPktLength := getMaxFrameLength() + + strip_vlan := "" + mod_vlan_id := "" + match_vlan := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if config.IPv4Mode { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ + "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=output:%s", defaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) + } + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + for _, netConfig := range bridge.patchedNetConfigs() { + // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() == nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { + continue + } + + for _, netConfig := range bridge.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + if config.IPv6Mode { + if ofPortPhys != "" { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", defaultOpenFlowCookie, ovsLocalPort, config.Default.EncapPort, ofPortPhys)) + } + + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 + for _, netConfig := range bridge.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() != nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { + continue + } + + for _, netConfig := range bridge.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + + var protoPrefix, masqIP, masqSubnet string + + // table 0, packets coming from Host -> Service + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ipv6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=2)", + defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) + + if util.IsNetworkSegmentationSupportEnabled() { + // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. + // For packets originating from UDN, commit without NATing, those + // have already been SNATed to the masq IP of the UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + if util.IsRouteAdvertisementsEnabled() { + // If the UDN is advertised then instead of matching on the masqSubnet + // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 + // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 + // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.isDefaultNetwork() { + continue + } + if netConfig.advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) + continue + } + + // Use the filtered subnet for the flow compute instead of the masqueradeIP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + } + } + } + } + + masqDst := masqIP + if util.IsNetworkSegmentationSupportEnabled() { + // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services + masqDst = masqSubnet + } + for _, netConfig := range bridge.patchedNetConfigs() { + // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ + "actions=ct(zone=%d,nat,table=3)", + defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR, + protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) + // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either + // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. + // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ + "actions=drop", defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR)) + } + } + + // table 0, add IP fragment reassembly flows, only needed in SGW mode with + // physical interface attached to bridge + if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { + reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) + dftFlows = append(dftFlows, reassemblyFlows...) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.patchedNetConfigs() { + var actions string + if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { + actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) + } else { + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) + } + + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + + } + + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + } + } + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) + + } + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) + + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + } + + defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] + + // table 2, dispatch from Host -> OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=2, "+ + "actions=set_field:%s->eth_dst,%soutput:%s", defaultOpenFlowCookie, + bridgeMacAddress, mod_vlan_id, defaultNetConfig.ofPortPatch)) + + // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have + // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. + if config.IPv4Mode { + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.isDefaultNetwork() { + continue + } + srcIPOrSubnet := netConfig.v4MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) + continue + } + + // Use the filtered subnets for the flow compute instead of the masqueradeIP + srcIPOrSubnet = matchingIPFamilySubnet.String() + } + + // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that + // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to + // a service in another UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=drop", + defaultOpenFlowCookie, srcIPOrSubnet)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.pktMark, + bridgeMacAddress, netConfig.ofPortPatch)) + } + } + + if config.IPv6Mode { + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.isDefaultNetwork() { + continue + } + srcIPOrSubnet := netConfig.v6MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) + continue + } + + // Use the filtered subnets for the flow compute instead of the masqueradeIP + srcIPOrSubnet = matchingIPFamilySubnet.String() + } + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=drop", + defaultOpenFlowCookie, srcIPOrSubnet)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + defaultOpenFlowCookie, netConfig.pktMark, + bridgeMacAddress, netConfig.ofPortPatch)) + } + } + + // table 3, dispatch from OVN -> Host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=3, %s "+ + "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", + defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + // table 4, hairpinned pkts that need to go from OVN -> Host + // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ip,"+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ipv6, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) + } + // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ip, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ipv6, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + return dftFlows, nil +} + +func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + ofPortPhys := bridge.ofPortPhys + bridgeMacAddress := bridge.macAddress.String() + ofPortHost := bridge.ofPortHost + bridgeIPs := bridge.ips + + var dftFlows []string + + strip_vlan := "" + match_vlan := "" + mod_vlan_id := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if ofPortPhys != "" { + // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports + actions := "" + for _, netConfig := range bridge.patchedNetConfigs() { + actions += "output:" + netConfig.ofPortPatch + "," + } + actions += strip_vlan + "NORMAL" + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", + defaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) + } + + // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all + // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). + for _, netConfig := range bridge.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", + defaultOpenFlowCookie, netConfig.ofPortPatch)) + } + + if config.IPv4Mode { + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.patchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // SNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { + if netConfig.masqCTMark != ctMarkOVN { + for mark, eip := range bridge.eipMarkIPs.GetIPv4() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.isDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, + netConfig.masqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host Commit connections with ct_mark ctMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range bridge.patchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + // table 0, packets coming from external or other localnet ports. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", + defaultOpenFlowCookie, config.Default.ConntrackZone)) + } + } + + if config.IPv6Mode { + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.patchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // DNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { + if netConfig.masqCTMark != ctMarkOVN { + for mark, eip := range bridge.eipMarkIPs.GetIPv6() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.isDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host. Commit connections with ct_mark ctMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) + + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range bridge.patchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + if ofPortPhys != "" { + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", + defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + if ofPortPhys != "" { + // table 0, packets coming from external. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ + "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) + } + } + // Egress IP is often configured on a node different from the one hosting the affected pod. + // Due to the fact that ovn-controllers on different nodes apply the changes independently, + // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. + // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) + defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] + if config.OVNKubernetesFeature.EnableEgressIP { + for _, clusterEntry := range config.Default.ClusterSubnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + // table 0, drop packets coming from pods headed externally that were not SNATed. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", + defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, ipv, ipv, cidr)) + } + for _, subnet := range defaultNetConfig.nodeSubnets { + ipv := getIPv(subnet) + if ofPortPhys != "" { + // table 0, commit connections from local pods. + // ICNIv2 requires that local pod traffic can leave the node without SNAT. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, bridgeMacAddress, ipv, ipv, subnet, + config.Default.ConntrackZone, ctMarkOVN, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + for _, netConfig := range bridge.patchedNetConfigs() { + isNetworkAdvertised := netConfig.advertised.Load() + // disableSNATMultipleGWs only applies to default network + disableSNATMultipleGWs := netConfig.isDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs + if !disableSNATMultipleGWs && !isNetworkAdvertised { + continue + } + output := netConfig.ofPortPatch + if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { + // except if advertised through BGP, go to kernel + // TODO: MEG enabled pods should still go through the patch port + // but holding this until + // https://issues.redhat.com/browse/FDP-646 is fixed, for now we + // are assuming MEG & BGP are not used together + output = ovsLocalPort + } + for _, clusterEntry := range netConfig.subnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + defaultOpenFlowCookie, ipv, ipv, cidr, output)) + } + if output == netConfig.ofPortPatch { + // except node management traffic + for _, subnet := range netConfig.nodeSubnets { + mgmtIP := util.GetNodeManagementIfAddr(subnet) + ipv := getIPv(mgmtIP) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + defaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, ovsLocalPort), + ) + } + } + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + if config.IPv6Mode { + // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved + // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry + for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", + defaultOpenFlowCookie, icmpType)) + } + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", + defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) + } + } + + if config.IPv4Mode { + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", + defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) + } + } + + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ + "actions=output:%s", defaultOpenFlowCookie, ofPortHost)) + + // Send UDN destined traffic to right patch port + for _, netConfig := range bridge.patchedNetConfigs() { + if netConfig.masqCTMark != ctMarkOVN { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ + "actions=output:%s", defaultOpenFlowCookie, netConfig.masqCTMark, netConfig.ofPortPatch)) + } + } + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=1, table=11, "+ + "actions=output:%s", defaultOpenFlowCookie, defaultNetConfig.ofPortPatch)) + } + + // table 1, all other connections do normal processing + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", defaultOpenFlowCookie)) + } + + return dftFlows, nil +} + +func pmtudDropFlows(bridge *bridgeConfiguration, ipAddrs []string) []string { + var flows []string + if config.Gateway.Mode != config.GatewayModeShared { + return nil + } + for _, addr := range ipAddrs { + for _, netConfig := range bridge.patchedNetConfigs() { + flows = append(flows, + generateICMPFragmentationFlow(addr, outputPortDrop, netConfig.ofPortPatch, pmtudOpenFlowCookie, 700)) + } + } + + return flows +} + +// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: +// a. from pods in the OVN network to pods in a localnet network, on the same node +// b. from pods on the host to pods in a localnet network, on the same node +// when the localnet is mapped to breth0. +// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node +// primary interface. +func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { + var flows []string + var ipFamily, ipFamilyDest string + + if isV6 { + ipFamily = "ipv6" + ipFamilyDest = "ipv6_dst" + } else { + ipFamily = "ip" + ipFamilyDest = "nw_dst" + } + + formatFlow := func(inPort, destIP, ctMark string) string { + // Matching IP traffic will be handled by the bridge instead of being output directly + // to the NIC by the existing flow at prio=100. + flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(flowTemplate, + defaultOpenFlowCookie, + inPort, + srcMAC, + ipFamily, + ipFamilyDest, + destIP, + config.Default.ConntrackZone, + ctMark) + } + + // Traffic path (a): OVN->localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(netConfig.ofPortPatch, hostSubnet.String(), netConfig.masqCTMark)) + } + } + + // Traffic path (a): OVN->localnet for local gw mode + // Traffic path (b): host->localnet for both gw modes + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(ovsLocalPort, hostSubnet.String(), ctMarkHost)) + } + + if isV6 { + // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) + // that is unrelated to the host subnets matched in the prio=102 flow above. + // Allow neighbor discovery by matching against ICMP type and ingress port. + formatICMPFlow := func(inPort, ctMark string, icmpType int) string { + icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(icmpFlowTemplate, + defaultOpenFlowCookie, + inPort, + srcMAC, + icmpType, + config.Default.ConntrackZone, + ctMark) + } + + for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { + // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + flows = append(flows, + formatICMPFlow(netConfig.ofPortPatch, netConfig.masqCTMark, icmpType)) + } + + // Traffic path (a) for ICMP: OVN->localnet for local gw mode + // Traffic path (b) for ICMP: host->localnet for both gw modes + flows = append(flows, formatICMPFlow(ovsLocalPort, ctMarkHost, icmpType)) + } + } + return flows +} + +func setBridgeOfPorts(bridge *bridgeConfiguration) error { + bridge.Lock() + defer bridge.Unlock() + // Get ofport of patchPort + for _, netConfig := range bridge.netConfig { + if err := netConfig.setBridgeNetworkOfPortsInternal(); err != nil { + return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.patchPort, err) + } + } + + if bridge.uplinkName != "" { + // Get ofport of physical interface + ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", bridge.uplinkName, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + bridge.uplinkName, stderr, err) + } + bridge.ofPortPhys = ofportPhys + } + + // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + var stderr string + hostRep, err := util.GetDPUHostInterface(bridge.bridgeName) + if err != nil { + return err + } + + bridge.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", + hostRep, stderr, err) + } + } else { + var err error + if bridge.gwIfaceRep != "" { + bridge.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", bridge.gwIfaceRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", bridge.gwIfaceRep, err) + } + } else { + bridge.ofPortHost = ovsLocalPort + } + } + + return nil +} + func newGateway( nodeName string, subnets []*net.IPNet, @@ -1425,9 +2403,7 @@ func newGateway( gatewayMode config.GatewayMode, ) (*gateway, error) { klog.Info("Creating new gateway") - gw := &gateway{ - nextHops: gwNextHops, - } + gw := &gateway{} if gatewayMode == config.GatewayModeLocal { if err := initLocalGateway(subnets, mgmtPort); err != nil { @@ -1444,19 +2420,37 @@ func newGateway( if exGwBridge != nil { gw.readyFunc = func() (bool, error) { - if !gwBridge.IsGatewayReady() { - return false, nil + gwBridge.Lock() + for _, netConfig := range gwBridge.netConfig { + ready, err := gatewayReady(netConfig.patchPort) + if err != nil || !ready { + gwBridge.Unlock() + return false, err + } } - if !exGwBridge.IsGatewayReady() { - return false, nil + gwBridge.Unlock() + exGwBridge.Lock() + for _, netConfig := range exGwBridge.netConfig { + exGWReady, err := gatewayReady(netConfig.patchPort) + if err != nil || !exGWReady { + exGwBridge.Unlock() + return false, err + } } + exGwBridge.Unlock() return true, nil } } else { gw.readyFunc = func() (bool, error) { - if !gwBridge.IsGatewayReady() { - return false, nil + gwBridge.Lock() + for _, netConfig := range gwBridge.netConfig { + ready, err := gatewayReady(netConfig.patchPort) + if err != nil || !ready { + gwBridge.Unlock() + return false, err + } } + gwBridge.Unlock() return true, nil } } @@ -1471,19 +2465,19 @@ func newGateway( // Program cluster.GatewayIntf to let non-pod traffic to go to host // stack klog.Info("Creating Gateway Openflow Manager") - err := gwBridge.SetOfPorts() + err := setBridgeOfPorts(gwBridge) if err != nil { return err } if exGwBridge != nil { - err = exGwBridge.SetOfPorts() + err = setBridgeOfPorts(exGwBridge) if err != nil { return err } } if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { - gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.GetBridgeName(), linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) - gwBridge.SetEIPMarkIPs(gw.bridgeEIPAddrManager.GetCache()) + gw.bridgeEIPAddrManager = newBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gwBridge.eipMarkIPs = gw.bridgeEIPAddrManager.GetCache() } gw.nodeIPManager = newAddressManager(nodeName, kube, mgmtPort, watchFactory, gwBridge) @@ -1491,15 +2485,15 @@ func newGateway( // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwBridge.GetGatewayIface(), nodeName, watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gwBridge.gwIface, nodeName, watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwBridge.GetGatewayIface()); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.GetGatewayIface(), err) + if err := setNodeMasqueradeIPOnExtBridge(gwBridge.gwIface); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.gwIface, err) } - if err := addMasqueradeRoute(routeManager, gwBridge.GetGatewayIface(), nodeName, gwIPs, watchFactory); err != nil { + if err := addMasqueradeRoute(routeManager, gwBridge.gwIface, nodeName, gwIPs, watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -1523,7 +2517,7 @@ func newGateway( } if gw.nodePortWatcher != nil { npw, _ := gw.nodePortWatcher.(*nodePortWatcher) - npw.updateGatewayIPs() + npw.updateGatewayIPs(gw.nodeIPManager) } // Services create OpenFlow flows as well, need to update them all if gw.servicesRetryFramework != nil { @@ -1546,7 +2540,7 @@ func newGateway( gw.openflowManager.requestFlowSync() } - if err := addHostMACBindings(gwBridge.GetGatewayIface()); err != nil { + if err := addHostMACBindings(gwBridge.gwIface); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing: %w", err) } @@ -1558,7 +2552,7 @@ func newGateway( } func newNodePortWatcher( - gwBridge *bridgeconfig.BridgeConfiguration, + gwBridge *bridgeConfiguration, ofm *openflowManager, nodeIPManager *addressManager, watchFactory factory.NodeWatchFactory, @@ -1567,10 +2561,10 @@ func newNodePortWatcher( // Get ofport of physical interface ofportPhys, stderr, err := util.GetOVSOfPort("--if-exists", "get", - "interface", gwBridge.GetUplinkName(), "ofport") + "interface", gwBridge.uplinkName, "ofport") if err != nil { return nil, fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - gwBridge.GetUplinkName(), stderr, err) + gwBridge.uplinkName, stderr, err) } // In the shared gateway mode, the NodePort service is handled by the OpenFlow flows configured @@ -1608,11 +2602,11 @@ func newNodePortWatcher( subnets = append(subnets, config.Kubernetes.ServiceCIDRs...) if config.Gateway.DisableForwarding { if err := initExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) + return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) } } else { if err := delExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) + return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) } } @@ -1623,14 +2617,14 @@ func newNodePortWatcher( } // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.GetIPs()) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.ips) npw := &nodePortWatcher{ dpuMode: dpuMode, gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - gwBridge: gwBridge, + gwBridge: gwBridge.bridgeName, serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, @@ -1891,6 +2885,36 @@ func updateMasqueradeAnnotation(nodeName string, kube kube.Interface) error { return nil } +// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a +// specific conntrack zone for reassembly with the same priority as node port +// flows that match on L4 fields. After reassembly packets are reinjected to +// table 0 again. This requires a conntrack immplementation that reassembles +// fragments. This reqreuiment is met for the kernel datapath with the netfilter +// module loaded. This reqreuiment is not met for the userspace datapath. +func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { + flows := make([]string, 0, 2) + if config.IPv4Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", + defaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + if config.IPv6Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", + defaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + + return flows +} + // deleteStaleMasqueradeResources removes stale Linux resources when config.Gateway.V4MasqueradeSubnet // or config.Gateway.V6MasqueradeSubnet gets changed at day 2. func deleteStaleMasqueradeResources(bridgeName, nodeName string, wf factory.NodeWatchFactory) error { @@ -2024,6 +3048,14 @@ func deleteMasqueradeResources(link netlink.Link, staleMasqueradeIPs *config.Mas return utilerrors.Join(aggregatedErrors...) } +func getIPv(ipnet *net.IPNet) string { + prefix := "ip" + if utilnet.IsIPv6CIDR(ipnet) { + prefix = "ipv6" + } + return prefix +} + // configureAdvertisedUDNIsolationNFTables configures nftables to drop traffic generated locally towards advertised UDN subnets. // It sets up a nftables chain named nftablesUDNBGPOutputChain in the output hook with filter priority which drops // traffic originating from the local node destined to nftablesAdvertisedUDNsSet. diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 026ecd94fc..d991fc74eb 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -6,6 +6,7 @@ import ( "net" "slices" "strings" + "sync/atomic" "time" "github.com/vishvananda/netlink" @@ -91,6 +92,149 @@ type UserDefinedNetworkGateway struct { gwInterfaceIndex int } +// UTILS Needed for UDN (also leveraged for default netInfo) in bridgeConfiguration + +// getBridgePortConfigurations returns a slice of Network port configurations along with the +// uplinkName and physical port's ofport value +func (b *bridgeConfiguration) getBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { + b.Lock() + defer b.Unlock() + var netConfigs []*bridgeUDNConfiguration + for _, netConfig := range b.netConfig { + netConfigs = append(netConfigs, netConfig.shallowCopy()) + } + return netConfigs, b.uplinkName, b.ofPortPhys +} + +// addNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache +func (b *bridgeConfiguration) addNetworkBridgeConfig( + nInfo util.NetInfo, + nodeSubnets []*net.IPNet, + masqCTMark, pktMark uint, + v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { + b.Lock() + defer b.Unlock() + + netName := nInfo.GetNetworkName() + patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) + + _, found := b.netConfig[netName] + if !found { + netConfig := &bridgeUDNConfiguration{ + patchPort: patchPort, + masqCTMark: fmt.Sprintf("0x%x", masqCTMark), + pktMark: fmt.Sprintf("0x%x", pktMark), + v4MasqIPs: v4MasqIPs, + v6MasqIPs: v6MasqIPs, + subnets: nInfo.Subnets(), + nodeSubnets: nodeSubnets, + } + netConfig.advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) + + b.netConfig[netName] = netConfig + } else { + klog.Warningf("Trying to update bridge config for network %s which already"+ + "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) + } + return nil +} + +// delNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache +func (b *bridgeConfiguration) delNetworkBridgeConfig(nInfo util.NetInfo) { + b.Lock() + defer b.Unlock() + + delete(b.netConfig, nInfo.GetNetworkName()) +} + +func (b *bridgeConfiguration) getNetworkBridgeConfig(networkName string) *bridgeUDNConfiguration { + b.Lock() + defer b.Unlock() + return b.netConfig[networkName] +} + +// getActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the +// provided netInfo. +// +// NOTE: if the network configuration can't be found or if the network is not patched by OVN +// yet this returns nil. +func (b *bridgeConfiguration) getActiveNetworkBridgeConfigCopy(networkName string) *bridgeUDNConfiguration { + b.Lock() + defer b.Unlock() + + if netConfig, found := b.netConfig[networkName]; found && netConfig.ofPortPatch != "" { + return netConfig.shallowCopy() + } + return nil +} + +func (b *bridgeConfiguration) patchedNetConfigs() []*bridgeUDNConfiguration { + result := make([]*bridgeUDNConfiguration, 0, len(b.netConfig)) + for _, netConfig := range b.netConfig { + if netConfig.ofPortPatch == "" { + continue + } + result = append(result, netConfig) + } + return result +} + +// END UDN UTILs for bridgeConfiguration + +// bridgeUDNConfiguration holds the patchport and ctMark +// information for a given network +type bridgeUDNConfiguration struct { + patchPort string + ofPortPatch string + masqCTMark string + pktMark string + v4MasqIPs *udn.MasqueradeIPs + v6MasqIPs *udn.MasqueradeIPs + subnets []config.CIDRNetworkEntry + nodeSubnets []*net.IPNet + advertised atomic.Bool +} + +func (netConfig *bridgeUDNConfiguration) shallowCopy() *bridgeUDNConfiguration { + copy := &bridgeUDNConfiguration{ + patchPort: netConfig.patchPort, + ofPortPatch: netConfig.ofPortPatch, + masqCTMark: netConfig.masqCTMark, + pktMark: netConfig.pktMark, + v4MasqIPs: netConfig.v4MasqIPs, + v6MasqIPs: netConfig.v6MasqIPs, + subnets: netConfig.subnets, + nodeSubnets: netConfig.nodeSubnets, + } + netConfig.advertised.Store(netConfig.advertised.Load()) + return copy +} + +func (netConfig *bridgeUDNConfiguration) isDefaultNetwork() bool { + return netConfig.masqCTMark == ctMarkOVN +} + +func (netConfig *bridgeUDNConfiguration) setBridgeNetworkOfPortsInternal() error { + ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.patchPort, "ofport") + if err != nil { + return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ + "while getting ofport. stderr: %v, error: %v", netConfig.patchPort, stderr, err) + } + netConfig.ofPortPatch = ofportPatch + return nil +} + +func setBridgeNetworkOfPorts(bridge *bridgeConfiguration, netName string) error { + bridge.Lock() + defer bridge.Unlock() + + netConfig, found := bridge.netConfig[netName] + if !found { + return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, bridge.bridgeName) + } + return netConfig.setBridgeNetworkOfPortsInternal() +} + func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeLister listers.NodeLister, kubeInterface kube.Interface, vrfManager *vrfmanager.Controller, ruleManager *iprulemanager.Controller, defaultNetworkGateway Gateway) (*UserDefinedNetworkGateway, error) { @@ -124,7 +268,7 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeL if gw.openflowManager == nil { return nil, fmt.Errorf("openflow manager has not been provided for network: %s", netInfo.GetNetworkName()) } - intfName := gw.openflowManager.defaultBridge.GetGatewayIface() + intfName := gw.openflowManager.defaultBridge.gwIface link, err := util.GetNetLinkOps().LinkByName(intfName) if err != nil { return nil, fmt.Errorf("unable to get link for %s, error: %v", intfName, err) @@ -162,9 +306,7 @@ func (udng *UserDefinedNetworkGateway) delMarkChain() error { chain := &knftables.Chain{ Name: GetUDNMarkChain(fmt.Sprintf("0x%x", udng.pktMark)), } - // Delete would return an error if we tried to delete a chain that didn't exist, so - // we do an Add first (which is a no-op if the chain already exists) and then Delete. - tx.Add(chain) + tx.Flush(chain) tx.Delete(chain) return nft.Run(context.TODO(), tx) } @@ -256,12 +398,12 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { waiter := newStartupWaiterWithTimeout(waitForPatchPortTimeout) readyFunc := func() (bool, error) { - if err := udng.openflowManager.defaultBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { + if err := setBridgeNetworkOfPorts(udng.openflowManager.defaultBridge, udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for default bridge; error: %v", udng.GetNetworkName(), err) return false, nil } if udng.openflowManager.externalGatewayBridge != nil { - if err := udng.openflowManager.externalGatewayBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { + if err := setBridgeNetworkOfPorts(udng.openflowManager.externalGatewayBridge, udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for secondary bridge; error: %v", udng.GetNetworkName(), err) return false, nil } @@ -601,7 +743,7 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) var retVal []netlink.Route var defaultAnyCIDR *net.IPNet - for _, nextHop := range udng.gateway.nextHops { + for _, nextHop := range udng.gateway.openflowManager.defaultBridge.nextHops { isV6 := utilnet.IsIPv6(nextHop) _, defaultAnyCIDR, _ = net.ParseCIDR("0.0.0.0/0") if isV6 { @@ -793,11 +935,11 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // update bridge configuration isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) - netConfig := udng.openflowManager.defaultBridge.GetNetworkConfig(udng.GetNetworkName()) + netConfig := udng.openflowManager.defaultBridge.getNetworkBridgeConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } - netConfig.Advertised.Store(isNetworkAdvertised) + netConfig.advertised.Store(isNetworkAdvertised) if err := udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating ip rule for UDN %s: %s", udng.GetNetworkName(), err) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 34848faf7e..575d8bc9c8 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -21,6 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" "sigs.k8s.io/knftables" @@ -31,7 +32,6 @@ import ( factoryMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" @@ -237,13 +237,115 @@ func openflowManagerCheckPorts(ofMgr *openflowManager) { GinkgoHelper() netConfigs, uplink, ofPortPhys := ofMgr.getDefaultBridgePortConfigurations() sort.SliceStable(netConfigs, func(i, j int) bool { - return netConfigs[i].PatchPort < netConfigs[j].PatchPort + return netConfigs[i].patchPort < netConfigs[j].patchPort }) Expect(checkPorts(netConfigs, uplink, ofPortPhys)).To(Succeed()) } +func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { + By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) + + var masqIP string + var masqSubnet string + var protoPrefix string + if utilnet.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ip6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + var nTable0DefaultFlows int + var nTable0UDNMasqFlows int + var nTable2Flows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", + ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, + masqIP)) { + nTable0DefaultFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", + ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { + nTable0UDNMasqFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", + bridgeMAC, defaultConfig.ofPortPatch)) { + nTable2Flows++ + } + } + + Expect(nTable0DefaultFlows).To(Equal(1)) + Expect(nTable0UDNMasqFlows).To(Equal(1)) + Expect(nTable2Flows).To(Equal(1)) +} + +func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking advertsised UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var matchingIPFamilySubnet *net.IPNet + var protoPrefix string + var udnAdvertisedSubnets []*net.IPNet + var err error + for _, clusterEntry := range netConfig.subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + if utilnet.IsIPv4CIDR(svcCIDR) { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip" + } else { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, matchingIPFamilySubnet)) { + nFlows++ + } + if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", + protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + +func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var mgmtMasqIP string + var protoPrefix string + if utilnet.IsIPv4CIDR(svcCIDR) { + mgmtMasqIP = netConfig.v4MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip" + } else { + mgmtMasqIP = netConfig.v6MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, mgmtMasqIP)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + func getDummyOpenflowManager() *openflowManager { - gwBridge := bridgeconfig.TestBridgeConfig("breth0") + gwBridge := &bridgeConfiguration{ + gwIface: "breth0", + bridgeName: "breth0", + } ofm := &openflowManager{ defaultBridge: gwBridge, } @@ -667,22 +769,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") - bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() - ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { udnFlows++ } } @@ -692,10 +794,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for table 2 for service isolation. - bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -707,8 +809,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -722,10 +824,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 for service isolation. - bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -898,22 +1000,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") - bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() - ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { udnFlows++ } } @@ -923,10 +1025,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for tables 0 and 2 for service isolation. - bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -938,8 +1040,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -953,10 +1055,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for tables 0 and 2 for service isolation. - bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1139,22 +1241,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") - bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() - ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() + Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { udnFlows++ } } @@ -1164,10 +1266,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per advertised UDN for table 2 and table 0 for service isolation. - bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) + checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1179,8 +1281,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1194,10 +1296,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 and table0 for service isolation. - bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1380,8 +1482,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() - udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, - &gateway{openflowManager: ofm, nextHops: ovntest.MustParseIPs(config.Gateway.NextHop)}) + ofm.defaultBridge.nextHops = ovntest.MustParseIPs(config.Gateway.NextHop) + udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, &gateway{openflowManager: ofm}) Expect(err).NotTo(HaveOccurred()) mplink, err := netlink.LinkByName(mgtPort) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/helper_linux.go b/go-controller/pkg/node/helper_linux.go index 8b46f05315..5e55173a4a 100644 --- a/go-controller/pkg/node/helper_linux.go +++ b/go-controller/pkg/node/helper_linux.go @@ -153,6 +153,23 @@ func getDefaultGatewayInterfaceByFamily(family int, gwIface string) (string, net return "", net.IP{}, nil } +func getIntfName(gatewayIntf string) (string, error) { + // The given (or autodetected) interface is an OVS bridge and this could be + // created by us using util.NicToBridge() or it was pre-created by the user. + + // Is intfName a port of gatewayIntf? + intfName, err := util.GetNicName(gatewayIntf) + if err != nil { + return "", err + } + _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") + if err != nil { + return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + intfName, stderr, err) + } + return intfName, nil +} + // filterRoutesByIfIndex is a helper function that will sieve the provided routes and check // if they match the provided index. This used to be implemented with netlink.RT_FILTER_OIF, // however the problem is that this filtered out MultiPath IPv6 routes which have a LinkIndex of 0. diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index dcbbbfc7d6..a0c5ab21e8 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -20,7 +20,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -38,21 +37,21 @@ type addressManager struct { syncPeriod time.Duration // compare node primary IP change nodePrimaryAddr net.IP - gatewayBridge *bridgeconfig.BridgeConfiguration + gatewayBridge *bridgeConfiguration OnChanged func() sync.Mutex } // initializes a new address manager which will hold all the IPs on a node -func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration) *addressManager { +func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration) *addressManager { return newAddressManagerInternal(nodeName, k, mgmtPort, watchFactory, gwBridge, true) } // newAddressManagerInternal creates a new address manager; this function is // only expose for testcases to disable netlink subscription to ensure // reproducibility of unit tests. -func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration, useNetlink bool) *addressManager { +func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration, useNetlink bool) *addressManager { mgr := &addressManager{ nodeName: nodeName, watchFactory: watchFactory, @@ -65,11 +64,27 @@ func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort manag } mgr.nodeAnnotator = kube.NewNodeAnnotator(k, nodeName) if config.OvnKubeNode.Mode == types.NodeModeDPU { - if err := mgr.updateHostCIDRs(); err != nil { + var ifAddrs []*net.IPNet + + // update k8s.ovn.org/host-cidrs + node, err := watchFactory.GetNode(nodeName) + if err != nil { + klog.Errorf("Failed to get node %s: %v", nodeName, err) + return nil + } + if useNetlink { + // get updated interface IP addresses for the gateway bridge + ifAddrs, err = gwBridge.updateInterfaceIPAddresses(node) + if err != nil { + klog.Errorf("Failed to obtain interface IP addresses for node %s: %v", nodeName, err) + return nil + } + } + if err = mgr.updateHostCIDRs(ifAddrs); err != nil { klog.Errorf("Failed to update host-cidrs annotations on node %s: %v", nodeName, err) return nil } - if err := mgr.nodeAnnotator.Run(); err != nil { + if err = mgr.nodeAnnotator.Run(); err != nil { klog.Errorf("Failed to set host-cidrs annotations on node %s: %v", nodeName, err) return nil } @@ -263,14 +278,14 @@ func (c *addressManager) updateNodeAddressAnnotations() error { if c.useNetlink { // get updated interface IP addresses for the gateway bridge - ifAddrs, err = c.gatewayBridge.UpdateInterfaceIPAddresses(node) + ifAddrs, err = c.gatewayBridge.updateInterfaceIPAddresses(node) if err != nil { return err } } // update k8s.ovn.org/host-cidrs - if err = c.updateHostCIDRs(); err != nil { + if err = c.updateHostCIDRs(ifAddrs); err != nil { return err } @@ -300,10 +315,14 @@ func (c *addressManager) updateNodeAddressAnnotations() error { return nil } -func (c *addressManager) updateHostCIDRs() error { +func (c *addressManager) updateHostCIDRs(ifAddrs []*net.IPNet) error { if config.OvnKubeNode.Mode == types.NodeModeDPU { - // For DPU mode, we don't need to update the host-cidrs annotation. - return nil + // For DPU mode, here we need to use the DPU host's IP address which is the tenant cluster's + // host internal IP address instead. + // Currently we are only intentionally supporting IPv4 for DPU here. + nodeIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) + nodeAddrSet := sets.New[string](nodeIPNetv4.String()) + return util.SetNodeHostCIDRs(c.nodeAnnotator, nodeAddrSet) } return util.SetNodeHostCIDRs(c.nodeAnnotator, c.cidrs) @@ -418,8 +437,7 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { // Two methods to lookup EIPs assigned to the gateway bridge. Fast path from a shared cache or slow path from node annotations. // At startup, gateway bridge cache gets sync - eipMarkIPs := c.gatewayBridge.GetEIPMarkIPs() - if eipMarkIPs != nil && eipMarkIPs.HasSyncdOnce() && eipMarkIPs.IsIPPresent(addr) { + if c.gatewayBridge.eipMarkIPs != nil && c.gatewayBridge.eipMarkIPs.HasSyncdOnce() && c.gatewayBridge.eipMarkIPs.IsIPPresent(addr) { return false } else { if eipAddresses, err := c.getPrimaryHostEgressIPs(); err != nil { diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index aa819cdb8a..ee10bbfc41 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -21,7 +21,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" nodemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" @@ -402,7 +401,7 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { mpmock := &nodemocks.ManagementPort{} mpmock.On("GetAddresses").Return([]*net.IPNet{tc.mgmtPortIP4, tc.mgmtPortIP6}) - fakeBridgeConfiguration := bridgeconfig.TestBridgeConfig("breth0") + fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0", gwIface: "breth0"} k := &kube.Kube{KClient: tc.fakeClient} tc.ipManager = newAddressManagerInternal(nodeName, k, mpmock, tc.watchFactory, fakeBridgeConfiguration, useNetlink) diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index de3a721519..96b55a52e1 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -13,15 +13,13 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) type openflowManager struct { - defaultBridge *bridgeconfig.BridgeConfiguration - externalGatewayBridge *bridgeconfig.BridgeConfiguration + defaultBridge *bridgeConfiguration + externalGatewayBridge *bridgeConfiguration // flow cache, use map instead of array for readability when debugging flowCache map[string][]string flowMutex sync.Mutex @@ -33,20 +31,20 @@ type openflowManager struct { // UTILs Needed for UDN (also leveraged for default netInfo) in openflowmanager -func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { - return c.defaultBridge.GetPortConfigurations() +func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { + return c.defaultBridge.getBridgePortConfigurations() } -func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { - return c.externalGatewayBridge.GetPortConfigurations() +func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { + return c.externalGatewayBridge.getBridgePortConfigurations() } func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - if err := c.defaultBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.defaultBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } if c.externalGatewayBridge != nil { - if err := c.externalGatewayBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.externalGatewayBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } } @@ -54,28 +52,34 @@ func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNe } func (c *openflowManager) delNetwork(nInfo util.NetInfo) { - c.defaultBridge.DelNetworkConfig(nInfo) + c.defaultBridge.delNetworkBridgeConfig(nInfo) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.DelNetworkConfig(nInfo) + c.externalGatewayBridge.delNetworkBridgeConfig(nInfo) } } -func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeconfig.BridgeUDNConfiguration { - return c.defaultBridge.GetActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) +func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeUDNConfiguration { + return c.defaultBridge.getActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) } // END UDN UTILs func (c *openflowManager) getDefaultBridgeName() string { - return c.defaultBridge.GetBridgeName() + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + return c.defaultBridge.bridgeName } func (c *openflowManager) getDefaultBridgeMAC() net.HardwareAddr { - return c.defaultBridge.GetMAC() + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + return c.defaultBridge.macAddress } func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { - c.defaultBridge.SetMAC(macAddr) + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + c.defaultBridge.macAddress = macAddr } func (c *openflowManager) updateFlowCacheEntry(key string, flows []string) { @@ -112,6 +116,10 @@ func (c *openflowManager) requestFlowSync() { } func (c *openflowManager) syncFlows() { + // protect gwBridge config from being updated by gw.nodeIPManager + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + c.flowMutex.Lock() defer c.flowMutex.Unlock() @@ -120,12 +128,15 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.GetBridgeName(), flows) + _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.bridgeName, flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.flowCache) } if c.externalGatewayBridge != nil { + c.externalGatewayBridge.Lock() + defer c.externalGatewayBridge.Unlock() + c.exGWFlowMutex.Lock() defer c.exGWFlowMutex.Unlock() @@ -134,7 +145,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.GetBridgeName(), flows) + _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.bridgeName, flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.exGWFlowCache) } @@ -149,7 +160,7 @@ func (c *openflowManager) syncFlows() { // // -- to handle host -> service access, via masquerading from the host to OVN GR // -- to handle external -> service(ExternalTrafficPolicy: Local) -> host access without SNAT -func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeconfig.BridgeConfiguration) (*openflowManager, error) { +func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeConfiguration) (*openflowManager, error) { // add health check function to check default OpenFlow flows are on the shared gateway bridge ofm := &openflowManager{ defaultBridge: gwBridge, @@ -200,10 +211,16 @@ func (c *openflowManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) } func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []string) { - dftFlows := c.defaultBridge.PMTUDDropFlows(ipAddrs) + // protect defaultBridge config from being updated by gw.nodeIPManager + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + + dftFlows := pmtudDropFlows(c.defaultBridge, ipAddrs) c.updateFlowCacheEntry(key, dftFlows) if c.externalGatewayBridge != nil { - exGWBridgeDftFlows := c.externalGatewayBridge.PMTUDDropFlows(ipAddrs) + c.externalGatewayBridge.Lock() + defer c.externalGatewayBridge.Unlock() + exGWBridgeDftFlows := pmtudDropFlows(c.externalGatewayBridge, ipAddrs) c.updateExBridgeFlowCacheEntry(key, exGWBridgeDftFlows) } } @@ -211,49 +228,59 @@ func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []strin // updateBridgeFlowCache generates the "static" per-bridge flows // note: this is shared between shared and local gateway modes func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets []*net.IPNet) error { + // protect defaultBridge config from being updated by gw.nodeIPManager + c.defaultBridge.Lock() + defer c.defaultBridge.Unlock() + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - dftFlows, err := c.defaultBridge.DefaultBridgeFlows(hostSubnets, hostIPs) + dftFlows, err := flowsForDefaultBridge(c.defaultBridge, hostIPs) + if err != nil { + return err + } + dftCommonFlows, err := commonFlows(hostSubnets, c.defaultBridge) if err != nil { return err } + dftFlows = append(dftFlows, dftCommonFlows...) c.updateFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateFlowCacheEntry("DEFAULT", dftFlows) // we consume ex gw bridge flows only if that is enabled if c.externalGatewayBridge != nil { - exGWBridgeDftFlows, err := c.externalGatewayBridge.ExternalBridgeFlows(hostSubnets) + c.externalGatewayBridge.Lock() + defer c.externalGatewayBridge.Unlock() + c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) + exGWBridgeDftFlows, err := commonFlows(hostSubnets, c.externalGatewayBridge) if err != nil { return err } - - c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateExBridgeFlowCacheEntry("DEFAULT", exGWBridgeDftFlows) } return nil } -func checkPorts(netConfigs []*bridgeconfig.BridgeUDNConfiguration, physIntf, ofPortPhys string) error { +func checkPorts(netConfigs []*bridgeUDNConfiguration, physIntf, ofPortPhys string) error { // it could be that the ovn-controller recreated the patch between the host OVS bridge and // the integration bridge, as a result the ofport number changed for that patch interface for _, netConfig := range netConfigs { - if netConfig.OfPortPatch == "" { + if netConfig.ofPortPatch == "" { continue } - curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.PatchPort, "ofport") + curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.patchPort, "ofport") if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.PatchPort, stderr, err) + return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.patchPort, stderr, err) } - if netConfig.OfPortPatch != curOfportPatch { - if netConfig.IsDefaultNetwork() { + if netConfig.ofPortPatch != curOfportPatch { + if netConfig.isDefaultNetwork() { klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", - netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) + netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) os.Exit(1) } else { - klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) + klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) } } } @@ -335,10 +362,10 @@ func bootstrapOVSFlows(nodeName string) error { // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - nodetypes.DefaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) + defaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - nodetypes.DefaultOpenFlowCookie, ofportPatch)) + defaultOpenFlowCookie, ofportPatch)) dftFlows = append(dftFlows, "priority=0, table=0, actions=output:NORMAL") _, stderr, err = util.ReplaceOFFlows(bridge, dftFlows) diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go deleted file mode 100644 index bdf9c388bf..0000000000 --- a/go-controller/pkg/node/types/const.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -const ( - // CtMarkOVN is the conntrack mark value for OVN traffic - CtMarkOVN = "0x1" - // OvsLocalPort is the name of the OVS bridge local port - OvsLocalPort = "LOCAL" - // DefaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. - // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. - DefaultOpenFlowCookie = "0xdeff105" - // OutputPortDrop is used to signify that there is no output port for an openflow action and the - // rendered action should result in a drop - OutputPortDrop = "output-port-drop" - // OvnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for - // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. - OvnKubeNodeSNATMark = "0x3f0" - // PmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, - // fragmentation-needed (4) - PmtudOpenFlowCookie = "0x0304" - // CtMarkHost is the conntrack mark value for host traffic - CtMarkHost = "0x2" -) diff --git a/go-controller/pkg/node/util/util.go b/go-controller/pkg/node/util/util.go deleted file mode 100644 index e04be61b39..0000000000 --- a/go-controller/pkg/node/util/util.go +++ /dev/null @@ -1,118 +0,0 @@ -package util - -import ( - "fmt" - "net" - - net2 "k8s.io/utils/net" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" - pkgutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" -) - -// GetNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. -func GetNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { - allIPs, err := pkgutil.GetFilteredInterfaceV4V6IPs(iface) - if err != nil { - return nil, fmt.Errorf("could not find IP addresses: %v", err) - } - - var ips []*net.IPNet - var foundIPv4 bool - var foundIPv6 bool - for _, ip := range allIPs { - if net2.IsIPv6CIDR(ip) { - if config.IPv6Mode && !foundIPv6 { - // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet - // in the routing table - subnetIP, err := pkgutil.GetIPv6OnSubnet(iface, ip) - if err != nil { - return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) - } - ips = append(ips, subnetIP) - foundIPv6 = true - } - } else if config.IPv4Mode && !foundIPv4 { - ips = append(ips, ip) - foundIPv4 = true - } - } - if config.IPv4Mode && !foundIPv4 { - return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) - } else if config.IPv6Mode && !foundIPv6 { - return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) - } - return ips, nil -} - -// GetDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP -// and DPU IP subnet overriden by config config.Gateway.RouterSubnet -func GetDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { - // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information - // for each node. - var gwIps []*net.IPNet - isIPv4 := net2.IsIPv4(k8sNodeIP) - - // override subnet mask via config - if config.Gateway.RouterSubnet != "" { - _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) - if err != nil { - return nil, err - } - if net2.IsIPv4CIDR(addr) != isIPv4 { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "does not match Node IP address format", config.Gateway.RouterSubnet) - } - if !addr.Contains(k8sNodeIP) { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) - } - addr.IP = k8sNodeIP - gwIps = append(gwIps, addr) - } else { - // Assume Host and DPU share the same subnet - // in this case just update the matching IPNet with the Host's IP address - for _, addr := range ifAddrs { - if net2.IsIPv4CIDR(addr) != isIPv4 { - continue - } - // expect k8s Node IP to be contained in the given subnet - if !addr.Contains(k8sNodeIP) { - continue - } - newAddr := *addr - newAddr.IP = k8sNodeIP - gwIps = append(gwIps, &newAddr) - } - if len(gwIps) == 0 { - return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) - } - } - return gwIps, nil -} - -func GenerateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { - // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that - // path MTU discovery continues to work. - icmpMatch := "icmp" - icmpType := 3 - icmpCode := 4 - nwDst := "nw_dst" - if net2.IsIPv6String(ipAddr) { - icmpMatch = "icmp6" - icmpType = 2 - icmpCode = 0 - nwDst = "ipv6_dst" - } - - action := fmt.Sprintf("output:%s", outputPort) - if outputPort == nodetypes.OutputPortDrop { - action = "drop" - } - - icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ - "icmp_code=%d, actions=%s", - cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) - return icmpFragmentationFlow -} diff --git a/go-controller/pkg/node/util/util_suite_test.go b/go-controller/pkg/node/util/util_suite_test.go deleted file mode 100644 index dc2d625792..0000000000 --- a/go-controller/pkg/node/util/util_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package util - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestNodeSuite(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node util Suite") -} diff --git a/go-controller/pkg/node/util/util_test.go b/go-controller/pkg/node/util/util_test.go deleted file mode 100644 index 5ca6cc80a3..0000000000 --- a/go-controller/pkg/node/util/util_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package util - -import ( - "net" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("node util tests", func() { - BeforeEach(func() { - Expect(config.PrepareTestConfig()).To(Succeed()) - }) - - Context("GetDPUHostPrimaryIPAddresses", func() { - - It("returns Gateway IP/Subnet for kubernetes node IP", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, - } - gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in host subnets", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.1.11") - _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - - It("returns node IP with config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.1.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, - } - gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - }) -}) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller.go b/go-controller/pkg/ovn/controller/apbroute/external_controller.go index 73f6208e96..cd034d67b7 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller.go @@ -22,7 +22,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - v1pod "k8s.io/kubernetes/pkg/api/v1/pod" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/adminpolicybasedroute/v1" @@ -566,14 +565,10 @@ func (m *externalPolicyManager) onPodUpdate(oldObj, newObj interface{}) { utilruntime.HandleError(errors.New("invalid Pod provided to onPodUpdate()")) return } - // if labels AND assigned Pod IPs AND the multus network status annotations AND - // pod PodReady condition AND deletion timestamp (PodTerminating) are - // the same, skip processing changes to the pod. + // if labels AND assigned Pod IPs AND the multus network status annotations are the same, skip processing changes to the pod. if reflect.DeepEqual(o.Labels, n.Labels) && reflect.DeepEqual(o.Status.PodIPs, n.Status.PodIPs) && - reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) && - reflect.DeepEqual(v1pod.GetPodReadyCondition(o.Status), v1pod.GetPodReadyCondition(n.Status)) && - reflect.DeepEqual(o.DeletionTimestamp, n.DeletionTimestamp) { + reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) { return } m.podQueue.Add(n) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go index 6f521bf2bb..57ab01d93b 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go @@ -201,32 +201,14 @@ var _ = Describe("OVN External Gateway namespace", func() { "k8s.ovn.org/routing-network": "", nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, annotatedPodIP)}, }, - Status: corev1.PodStatus{ - PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, + Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, Phase: corev1.PodRunning}, } podGW = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: namespaceGW.Name, Labels: map[string]string{"name": "pod"}, Annotations: map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, dynamicHopHostNetPodIP)}}, - Status: corev1.PodStatus{ - PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, + Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, Phase: corev1.PodRunning}, } namespaceTargetWithPod, namespaceTarget2WithPod, namespaceTarget2WithoutPod, namespaceGWWithPod *namespaceWithPods ) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go index 2b2915f521..9c49c474ba 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go @@ -11,10 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.TypedRateLimitingInterface[string]) error { @@ -31,13 +28,6 @@ func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.Ty } func getExGwPodIPs(gatewayPod *corev1.Pod, networkName string) (sets.Set[string], error) { - // If an external gateway pod is in terminating or not ready state then don't return the - // IPs for the external gateway pod - if util.PodTerminating(gatewayPod) || !v1pod.IsPodReadyConditionTrue(gatewayPod.Status) { - klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", gatewayPod.Namespace, gatewayPod.Name) - return nil, nil - } - if networkName != "" { return getMultusIPsFromNetworkName(gatewayPod, networkName) } diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go index 7cbbcd7430..509940c730 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go @@ -448,163 +448,6 @@ var _ = Describe("OVN External Gateway pod", func() { }) }) - - var _ = Context("When pod goes into terminating or not ready state", func() { - - DescribeTable("reconciles a pod gateway in terminating or not ready state that matches two policies", func( - terminating bool, - ) { - initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1}, - []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) - - expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTarget2WithPod}, - nil, - []*namespaceWithPods{namespaceGWWithPod}, false) - - expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithPod}, - nil, - []*namespaceWithPods{namespaceGWWithPod}, false) - - eventuallyExpectNumberOfPolicies(2) - eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) - eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) - - if terminating { - By("Setting deletion timestamp for the ex gw pod") - setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) - } else { - By("Updating the ex gw pod status to mark it as not ready") - setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) - } - - expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTarget2WithPod}, - nil, - []*namespaceWithPods{namespaceGWWithoutPod}, false) - - expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithPod}, - nil, - []*namespaceWithPods{namespaceGWWithoutPod}, false) - - eventuallyExpectNumberOfPolicies(2) - eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) - eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) - }, - Entry("Gateway pod in terminating state", true), - Entry("Gateway pod in not ready state", false), - ) - - DescribeTable("reconciles a pod gateway in terminating or not ready state that does not match any policy", func( - terminating bool, - ) { - noMatchPolicy := newPolicy( - "noMatchPolicy", - &metav1.LabelSelector{MatchLabels: targetNamespace1Match}, - nil, - &metav1.LabelSelector{MatchLabels: gatewayNamespaceMatch}, - &metav1.LabelSelector{MatchLabels: map[string]string{"key": "nomatch"}}, - false, - ) - initController([]runtime.Object{namespaceGW, namespaceTarget, pod1}, []runtime.Object{noMatchPolicy}) - - expectedPolicy, expectedRefs := expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithoutPod}, - nil, - []*namespaceWithPods{namespaceGWWithoutPod}, false) - - eventuallyExpectNumberOfPolicies(1) - eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) - - if terminating { - By("Setting deletion timestamp for the ex gw pod") - setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) - } else { - By("Updating the ex gw pod status to mark it as not ready") - setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) - } - // make sure pod event is handled - time.Sleep(100 * time.Millisecond) - - eventuallyExpectNumberOfPolicies(1) - eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) - }, - Entry("Gateway pod in terminating state", true), - Entry("Gateway pod in not ready state", false), - ) - - DescribeTable("reconciles a pod gateway in terminating or not ready state that is one of two pods that matches two policies", func( - terminating bool, - ) { - initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1, pod2}, - []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) - namespaceGWWith2Pods := newNamespaceWithPods(namespaceGW.Name, pod1, pod2) - expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTarget2WithPod}, - nil, - []*namespaceWithPods{namespaceGWWith2Pods}, false) - - expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithPod}, - nil, - []*namespaceWithPods{namespaceGWWith2Pods}, false) - - eventuallyExpectNumberOfPolicies(2) - eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) - eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) - - if terminating { - By("Setting deletion timestamp for the ex gw pod") - setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) - } else { - By("Updating the ex gw pod status to mark it as not ready") - setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) - } - - namespaceGWWith1Pod := newNamespaceWithPods(namespaceGW.Name, pod2) - - expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTarget2WithPod}, - nil, - []*namespaceWithPods{namespaceGWWith1Pod}, false) - - expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithPod}, - nil, - []*namespaceWithPods{namespaceGWWith1Pod}, false) - - eventuallyExpectNumberOfPolicies(2) - eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) - eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) - - if terminating { - By("Removing deletion timestamp for the ex gw pod") - setPodDeletionTimestamp(pod1, nil, fakeClient) - } else { - By("Updating the ex gw pod status to mark it as ready") - setPodConditionReady(pod1, corev1.ConditionTrue, fakeClient) - } - - expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTarget2WithPod}, - nil, - []*namespaceWithPods{namespaceGWWith2Pods}, false) - - expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( - []*namespaceWithPods{namespaceTargetWithPod}, - nil, - []*namespaceWithPods{namespaceGWWith2Pods}, false) - - eventuallyExpectNumberOfPolicies(2) - eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) - eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) - }, - Entry("Gateway pod in terminating state", true), - Entry("Gateway pod in not ready state", false), - ) - }) }) func deletePod(pod *corev1.Pod, fakeClient *fake.Clientset) { @@ -635,36 +478,6 @@ func updatePodStatus(pod *corev1.Pod, podStatus corev1.PodStatus) { Expect(err).NotTo(HaveOccurred()) } -func setPodDeletionTimestamp(pod *corev1.Pod, deletionTimestamp *metav1.Time, fakeClient *fake.Clientset) { - p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - incrementResourceVersion(p) - p.DeletionTimestamp = deletionTimestamp - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) - Expect(err).NotTo(HaveOccurred()) -} - -func setPodConditionReady(pod *corev1.Pod, condStatus corev1.ConditionStatus, fakeClient *fake.Clientset) { - p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - incrementResourceVersion(p) - if p.Status.Conditions != nil { - for i := range p.Status.Conditions { - if p.Status.Conditions[i].Type == corev1.PodReady { - p.Status.Conditions[i].Status = condStatus - } - } - } else { - notReadyCondition := corev1.PodCondition{ - Type: corev1.PodReady, - Status: corev1.ConditionFalse, - } - p.Status.Conditions = []corev1.PodCondition{notReadyCondition} - } - _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) - Expect(err).NotTo(HaveOccurred()) -} - func incrementResourceVersion(obj metav1.Object) { var rs int64 if obj.GetResourceVersion() != "" { diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go index 266312ce2c..2605fad7bc 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go @@ -40,16 +40,8 @@ func newPodWithPhaseAndIP(podName, namespace string, phase corev1.PodPhase, podI p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace, Labels: labels}, - Spec: corev1.PodSpec{NodeName: "node"}, - Status: corev1.PodStatus{ - Phase: phase, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, - }, + Spec: corev1.PodSpec{NodeName: "node"}, + Status: corev1.PodStatus{Phase: phase}, } if len(podIP) > 0 { p.Annotations = map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, podIP)} diff --git a/go-controller/pkg/ovn/controller/services/lb_config.go b/go-controller/pkg/ovn/controller/services/lb_config.go index e7c1372f20..2c47b1092b 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config.go +++ b/go-controller/pkg/ovn/controller/services/lb_config.go @@ -105,16 +105,25 @@ func makeNodeRouterTargetIPs(service *corev1.Service, node *nodeInfo, c *lbConfi targetIPsV6 = localIPsV6 } - // TODO: For all scenarios the lbAddress should be set to hostAddressesStr but this is breaking CI needs more investigation - lbAddresses := node.hostAddressesStr() - if config.OvnKubeNode.Mode == types.NodeModeFull { - lbAddresses = node.l3gatewayAddressesStr() + // OCP HACK BEGIN + if _, set := service.Annotations[localWithFallbackAnnotation]; set && c.externalTrafficLocal { + // if service is annotated and is ETP=local, fallback to ETP=cluster on nodes with no local endpoints: + // include endpoints from other nodes + if len(targetIPsV4) == 0 { + zeroRouterLocalEndpointsV4 = true + targetIPsV4 = c.clusterEndpoints.V4IPs + } + if len(targetIPsV6) == 0 { + zeroRouterLocalEndpointsV6 = true + targetIPsV6 = c.clusterEndpoints.V6IPs + } } + // OCP HACK END // Any targets local to the node need to have a special // harpin IP added, but only for the router LB - targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, lbAddresses, []string{hostMasqueradeIPV4}) - targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, lbAddresses, []string{hostMasqueradeIPV6}) + targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV4}) + targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV6}) // Local endpoints are a subset of cluster endpoints, so it is enough to compare their length v4Changed = len(targetIPsV4) != len(c.clusterEndpoints.V4IPs) || v4Updated diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index 26ad651206..ed79067e8e 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -949,7 +949,6 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode, types.DefaultNetworkName) nodeSubnetChange := nodeSubnetChanged(oldNode, newNode, types.DefaultNetworkName) nodeEncapIPsChanged := util.NodeEncapIPsChanged(oldNode, newNode) - nodePrimaryDPUHostAddrChanged := util.NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode) var aggregatedErrors []error if newNodeIsLocalZoneNode { @@ -1007,18 +1006,11 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int // Also check if node subnet changed, so static routes are properly set // Also check if the node is used to be a hybrid overlay node syncZoneIC = syncZoneIC || h.oc.isLocalZoneNode(oldNode) || nodeSubnetChange || zoneClusterChanged || - switchToOvnNode || nodeEncapIPsChanged || nodePrimaryDPUHostAddrChanged + switchToOvnNode || nodeEncapIPsChanged if syncZoneIC { klog.Infof("Node %q in remote zone %q, network %q, needs interconnect zone sync up. Zone cluster changed: %v", newNode.Name, util.GetNodeZone(newNode), h.oc.GetNetworkName(), zoneClusterChanged) } - // Reprovisioning the DPU (including OVS), which is pinned to a host, will change the system ID but not the node. - if config.OvnKubeNode.Mode == types.NodeModeDPU && nodeChassisChanged(oldNode, newNode) { - if err := h.oc.zoneChassisHandler.DeleteRemoteZoneNode(oldNode); err != nil { - aggregatedErrors = append(aggregatedErrors, err) - } - syncZoneIC = true - } if err := h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC); err != nil { aggregatedErrors = append(aggregatedErrors, err) } diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index b607a3b253..2b8e939585 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -15,7 +15,6 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" @@ -50,13 +49,6 @@ func (oc *DefaultNetworkController) addPodExternalGW(pod *corev1.Pod) error { klog.Infof("External gateway pod: %s, detected for namespace(s) %s", pod.Name, podRoutingNamespaceAnno) - // If an external gateway pod is in terminating or not ready state then don't add the - // routes for the external gateway pod - if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { - klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", pod.Namespace, pod.Name) - return nil - } - foundGws, err := getExGwPodIPs(pod) if err != nil { klog.Errorf("Error getting exgw IPs for pod: %s, error: %v", pod.Name, err) diff --git a/go-controller/pkg/ovn/egressgw_test.go b/go-controller/pkg/ovn/egressgw_test.go index 420f2f26e1..9696d4192b 100644 --- a/go-controller/pkg/ovn/egressgw_test.go +++ b/go-controller/pkg/ovn/egressgw_test.go @@ -6,7 +6,6 @@ import ( "fmt" "net" "sync" - "time" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" @@ -1819,591 +1818,6 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, ), ) - ginkgo.DescribeTable("reconciles a host networked pod in terminating or not ready state acting as a exgw for another namespace for existing pod", - func(bfd bool, - terminating bool, - beforeUpdateNB []libovsdbtest.TestData, - afterUpdateNB []libovsdbtest.TestData, - expectedNamespaceAnnotation string, - apbExternalRouteCRList *adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList) { - app.Action = func(*cli.Context) error { - - namespaceT := *newNamespace(namespaceName) - namespaceX := *newNamespace(namespace2Name) - t := newTPod( - "node1", - "10.128.1.0/24", - "10.128.1.2", - "10.128.1.1", - "myPod", - "10.128.1.3", - "0a:58:0a:80:01:03", - namespaceT.Name, - ) - gwPod := *newPod(namespaceX.Name, gwPodName, "node2", "9.0.0.1") - gwPod.Annotations = map[string]string{"k8s.ovn.org/routing-namespaces": namespaceT.Name} - if bfd { - gwPod.Annotations["k8s.ovn.org/bfd-enabled"] = "" - } - gwPod.Spec.HostNetwork = true - fakeOvn.startWithDBSetup( - libovsdbtest.TestSetup{ - NBData: []libovsdbtest.TestData{ - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - }, - }, - }, - &corev1.NamespaceList{ - Items: []corev1.Namespace{ - namespaceT, namespaceX, - }, - }, - &corev1.NodeList{ - Items: []corev1.Node{ - *newNode("node1", "192.168.126.202/24"), - *newNode("node2", "192.168.126.50/24"), - }, - }, - &corev1.PodList{ - Items: []corev1.Pod{ - *newPod(t.namespace, t.podName, t.nodeName, t.podIP), - }, - }, - apbExternalRouteCRList, - ) - t.populateLogicalSwitchCache(fakeOvn) - err := fakeOvn.controller.lsManager.AddOrUpdateSwitch("node2", []*net.IPNet{ovntest.MustParseIPNet("10.128.2.0/24")}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - injectNode(fakeOvn) - err = fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.WatchPods() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - fakeOvn.RunAPBExternalPolicyController() - - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Create(context.TODO(), &gwPod, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(beforeUpdateNB)) - gomega.Eventually(func() string { - return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] - }).Should(gomega.Equal("9.0.0.1")) - - if terminating { - ginkgo.By("Setting deletion timestamp for the ex gw pod") - gwPod.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(1000 * time.Second)} - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Update(context.TODO(), &gwPod, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } else { - ginkgo.By("Updating the ex gw pod status to mark it as not ready") - notReadyCondition := corev1.PodCondition{ - Type: corev1.PodReady, - Status: corev1.ConditionFalse, - } - gwPod.Status.Conditions = []corev1.PodCondition{notReadyCondition} - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).UpdateStatus(context.TODO(), &gwPod, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(afterUpdateNB)) - gomega.Eventually(func() string { - return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] - }).Should(gomega.Equal(expectedNamespaceAnnotation)) - for _, apbRoutePolicy := range apbExternalRouteCRList.Items { - checkAPBRouteStatus(fakeOvn, apbRoutePolicy.Name, false) - } - return nil - } - - err := app.Run([]string{app.Name}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }, - ginkgo.Entry("No BFD with ex gw pod in terminating state", false, true, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, - ), - ginkgo.Entry("No BFD with ex gw pod in not ready state", false, false, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, - ), - ginkgo.Entry("BFD Enabled with ex gw pod in terminating state", true, true, []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.BFD{ - UUID: bfd1NamedUUID, - DstIP: "9.0.0.1", - LogicalPort: "rtoe-GR_node1", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - BFD: &bfd1NamedUUID, - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, - ), - ginkgo.Entry("BFD Enabled with ex gw pod in not ready state", true, false, []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.BFD{ - UUID: bfd1NamedUUID, - DstIP: "9.0.0.1", - LogicalPort: "rtoe-GR_node1", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - BFD: &bfd1NamedUUID, - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, - ), - ginkgo.Entry("No BFD with ex gw pod in terminating state and with overlapping APB External Route CR and annotation", false, true, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ - Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ - newPolicy("policy", - &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, - nil, - false, - &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, - &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, - false, - ""), - }, - }, - ), - ginkgo.Entry("No BFD with ex gw pod in not ready state and with overlapping APB External Route CR and annotation", false, false, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-1-UUID", - IPPrefix: "10.128.1.3/32", - Nexthop: "9.0.0.1", - Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, - OutputPort: &logicalRouterPort, - Options: map[string]string{ - "ecmp_symmetric_reply": "true", - }, - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID"}, - }, - }, - []libovsdbtest.TestData{ - &nbdb.LogicalSwitchPort{ - UUID: "lsp1", - Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - ExternalIDs: map[string]string{ - "pod": "true", - "namespace": namespaceName, - }, - Name: "namespace1_myPod", - Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", - }, - PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - Ports: []string{"lsp1"}, - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", - StaticRoutes: []string{}, - }, - }, - "", - &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ - Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ - newPolicy("policy", - &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, - nil, - false, - &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, - &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, - false, - ""), - }, - }, - ), - ) }) ginkgo.Context("on using bfd", func() { ginkgo.It("should enable bfd only on the namespace gw when set", func() { diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 07b7b6a83b..293e23f4aa 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -16,7 +16,6 @@ import ( listers "k8s.io/client-go/listers/core/v1" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" - v1pod "k8s.io/kubernetes/pkg/api/v1/pod" libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" @@ -118,10 +117,6 @@ func networkStatusAnnotationsChanged(oldPod, newPod *corev1.Pod) bool { return oldPod.Annotations[nettypes.NetworkStatusAnnot] != newPod.Annotations[nettypes.NetworkStatusAnnot] } -func podBecameReady(oldPod, newPod *corev1.Pod) bool { - return !v1pod.IsPodReadyConditionTrue(oldPod.Status) && v1pod.IsPodReadyConditionTrue(newPod.Status) -} - // ensurePod tries to set up a pod. It returns nil on success and error on failure; failure // indicates the pod set up should be retried later. func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort bool) error { @@ -136,14 +131,6 @@ func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort b return oc.ensureRemotePodIP(oldPod, pod, addPort) } - // If an external gateway pod is in terminating or not ready state then remove the - // routes for the external gateway pod - if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { - if err := oc.deletePodExternalGW(pod); err != nil { - return fmt.Errorf("ensurePod failed %s/%s: %w", pod.Namespace, pod.Name, err) - } - } - if oc.isPodScheduledinLocalZone(pod) { klog.V(5).Infof("Ensuring zone local for Pod %s/%s in node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) return oc.ensureLocalZonePod(oldPod, pod, addPort) @@ -183,7 +170,7 @@ func (oc *DefaultNetworkController) ensureLocalZonePod(oldPod, pod *corev1.Pod, } } else { // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { if err := oc.addPodExternalGW(pod); err != nil { return fmt.Errorf("addPodExternalGW failed for %s/%s: %w", pod.Namespace, pod.Name, err) } @@ -250,7 +237,7 @@ func (oc *DefaultNetworkController) ensureRemoteZonePod(oldPod, pod *corev1.Pod, } // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { // check if this remote pod is serving as an external GW. If so add the routes in the namespace // associated with this remote pod if err := oc.addPodExternalGW(pod); err != nil { diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index 590d34bf3a..cf1caae6e7 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -124,12 +124,6 @@ func newPod(namespace, name, node, podIP string) *corev1.Pod { Phase: corev1.PodRunning, PodIP: podIP, PodIPs: podIPs, - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: corev1.ConditionTrue, - }, - }, }, } } diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index cddd754d60..4e9a984748 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -97,9 +97,6 @@ const ( // OVNNodeHostCIDRs is used to track the different host IP addresses and subnet masks on the node OVNNodeHostCIDRs = "k8s.ovn.org/host-cidrs" - // OVNNodePrimaryDPUHostAddr is used to track the primary DPU host address on the node - OVNNodePrimaryDPUHostAddr = "k8s.ovn.org/primary-dpu-host-addr" - // OVNNodeSecondaryHostEgressIPs contains EgressIP addresses that aren't managed by OVN. The EIP addresses are assigned to // standard linux interfaces and not interfaces of type OVS. OVNNodeSecondaryHostEgressIPs = "k8s.ovn.org/secondary-host-egress-ips" @@ -1537,39 +1534,3 @@ func ParseNodeEncapIPsAnnotation(node *corev1.Node) ([]string, error) { func NodeEncapIPsChanged(oldNode, newNode *corev1.Node) bool { return oldNode.Annotations[OVNNodeEncapIPs] != newNode.Annotations[OVNNodeEncapIPs] } - -// SetNodePrimaryDPUHostAddr sets the primary DPU host address annotation on a node -func SetNodePrimaryDPUHostAddr(nodeAnnotator kube.Annotator, ifAddrs []*net.IPNet) error { - nodeIPNetv4, _ := MatchFirstIPNetFamily(false, ifAddrs) - nodeIPNetv6, _ := MatchFirstIPNetFamily(true, ifAddrs) - - ifAddrAnnotation := ifAddr{} - if nodeIPNetv4 != nil { - ifAddrAnnotation.IPv4 = nodeIPNetv4.String() - } - if nodeIPNetv6 != nil { - ifAddrAnnotation.IPv6 = nodeIPNetv6.String() - } - return nodeAnnotator.Set(OVNNodePrimaryDPUHostAddr, ifAddrAnnotation) -} - -// NodePrimaryDPUHostAddrAnnotationChanged returns true if the primary DPU host address annotation changed -func NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode *corev1.Node) bool { - return oldNode.Annotations[OVNNodePrimaryDPUHostAddr] != newNode.Annotations[OVNNodePrimaryDPUHostAddr] -} - -// GetNodePrimaryDPUHostAddrAnnotation returns the raw primary DPU host address annotation from a node -func GetNodePrimaryDPUHostAddrAnnotation(node *corev1.Node) (*ifAddr, error) { - addrAnnotation, ok := node.Annotations[OVNNodePrimaryDPUHostAddr] - if !ok { - return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodePrimaryDPUHostAddr, node.Name) - } - nodeIfAddr := &ifAddr{} - if err := json.Unmarshal([]byte(addrAnnotation), nodeIfAddr); err != nil { - return nil, fmt.Errorf("failed to unmarshal annotation: %s for node %q, err: %v", OVNNodePrimaryDPUHostAddr, node.Name, err) - } - if nodeIfAddr.IPv4 == "" && nodeIfAddr.IPv6 == "" { - return nil, fmt.Errorf("node: %q does not have any IP information set", node.Name) - } - return nodeIfAddr, nil -} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go index 6f4518f097..f374a5c511 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ limitations under the License. package versioned import ( - fmt "fmt" - http "net/http" + "fmt" + "net/http" k8sv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" discovery "k8s.io/client-go/discovery" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go index eb8da4c265..a67d14acb8 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,12 +31,8 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. -// -// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves -// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. -// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go index 64c6b6be35..44e8061b76 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go index e6f64d71b9..3cdc1ac5b1 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go index 8514bb55f2..743391c14b 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go index 522a30ca3e..d6a1737fdb 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go index 19ad6aefe7..faa8377ce2 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go index 33fd99c15d..b38fd4c55d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go index e410e0b7e3..00db990cf9 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,32 +19,123 @@ limitations under the License. package fake import ( + "context" + v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" - gentype "k8s.io/client-go/gentype" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" ) -// fakeIPAMClaims implements IPAMClaimInterface -type fakeIPAMClaims struct { - *gentype.FakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList] +// FakeIPAMClaims implements IPAMClaimInterface +type FakeIPAMClaims struct { Fake *FakeK8sV1alpha1 + ns string +} + +var ipamclaimsResource = v1alpha1.SchemeGroupVersion.WithResource("ipamclaims") + +var ipamclaimsKind = v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim") + +// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. +func (c *FakeIPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ipamclaimsResource, c.ns, name), &v1alpha1.IPAMClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IPAMClaim), err +} + +// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. +func (c *FakeIPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ipamclaimsResource, ipamclaimsKind, c.ns, opts), &v1alpha1.IPAMClaimList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.IPAMClaimList{ListMeta: obj.(*v1alpha1.IPAMClaimList).ListMeta} + for _, item := range obj.(*v1alpha1.IPAMClaimList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested iPAMClaims. +func (c *FakeIPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ipamclaimsResource, c.ns, opts)) + +} + +// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. +func (c *FakeIPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IPAMClaim), err +} + +// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. +func (c *FakeIPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IPAMClaim), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ipamclaimsResource, "status", c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IPAMClaim), err +} + +// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. +func (c *FakeIPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(ipamclaimsResource, c.ns, name, opts), &v1alpha1.IPAMClaim{}) + + return err } -func newFakeIPAMClaims(fake *FakeK8sV1alpha1, namespace string) ipamclaimsv1alpha1.IPAMClaimInterface { - return &fakeIPAMClaims{ - gentype.NewFakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList]( - fake.Fake, - namespace, - v1alpha1.SchemeGroupVersion.WithResource("ipamclaims"), - v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim"), - func() *v1alpha1.IPAMClaim { return &v1alpha1.IPAMClaim{} }, - func() *v1alpha1.IPAMClaimList { return &v1alpha1.IPAMClaimList{} }, - func(dst, src *v1alpha1.IPAMClaimList) { dst.ListMeta = src.ListMeta }, - func(list *v1alpha1.IPAMClaimList) []*v1alpha1.IPAMClaim { return gentype.ToPointerSlice(list.Items) }, - func(list *v1alpha1.IPAMClaimList, items []*v1alpha1.IPAMClaim) { - list.Items = gentype.FromPointerSlice(items) - }, - ), - fake, +// DeleteCollection deletes a collection of objects. +func (c *FakeIPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ipamclaimsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.IPAMClaimList{}) + return err +} + +// Patch applies the patch and returns the patched iPAMClaim. +func (c *FakeIPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ipamclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha1.IPAMClaim{}) + + if obj == nil { + return nil, err } + return obj.(*v1alpha1.IPAMClaim), err } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go index 65c4b4c979..adc0c545ed 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ type FakeK8sV1alpha1 struct { } func (c *FakeK8sV1alpha1) IPAMClaims(namespace string) v1alpha1.IPAMClaimInterface { - return newFakeIPAMClaims(c, namespace) + return &FakeIPAMClaims{c, namespace} } // RESTClient returns a RESTClient that is used to communicate diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go index b70abd3102..c5c3006e82 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go index f4d088c1b9..bfc26c0c5a 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,15 @@ limitations under the License. package v1alpha1 import ( - context "context" + "context" + "time" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - gentype "k8s.io/client-go/gentype" + rest "k8s.io/client-go/rest" ) // IPAMClaimsGetter has a method to return a IPAMClaimInterface. @@ -37,34 +38,158 @@ type IPAMClaimsGetter interface { // IPAMClaimInterface has methods to work with IPAMClaim resources. type IPAMClaimInterface interface { - Create(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.CreateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) - Update(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) - // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (*v1alpha1.IPAMClaim, error) + Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) + UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*ipamclaimsv1alpha1.IPAMClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAMClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAMClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ipamclaimsv1alpha1.IPAMClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) IPAMClaimExpansion } // iPAMClaims implements IPAMClaimInterface type iPAMClaims struct { - *gentype.ClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList] + client rest.Interface + ns string } // newIPAMClaims returns a IPAMClaims func newIPAMClaims(c *K8sV1alpha1Client, namespace string) *iPAMClaims { return &iPAMClaims{ - gentype.NewClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList]( - "ipamclaims", - c.RESTClient(), - scheme.ParameterCodec, - namespace, - func() *ipamclaimsv1alpha1.IPAMClaim { return &ipamclaimsv1alpha1.IPAMClaim{} }, - func() *ipamclaimsv1alpha1.IPAMClaimList { return &ipamclaimsv1alpha1.IPAMClaimList{} }, - ), + client: c.RESTClient(), + ns: namespace, } } + +// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. +func (c *iPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { + result = &v1alpha1.IPAMClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ipamclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. +func (c *iPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IPAMClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ipamclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPAMClaims. +func (c *iPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ipamclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. +func (c *iPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { + result = &v1alpha1.IPAMClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ipamclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAMClaim). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. +func (c *iPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { + result = &v1alpha1.IPAMClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ipamclaims"). + Name(iPAMClaim.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAMClaim). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *iPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { + result = &v1alpha1.IPAMClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ipamclaims"). + Name(iPAMClaim.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAMClaim). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. +func (c *iPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ipamclaims"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("ipamclaims"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPAMClaim. +func (c *iPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { + result = &v1alpha1.IPAMClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ipamclaims"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go index 3545777356..d6b8684d89 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - http "net/http" + "net/http" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" + v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *K8sV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := ipamclaimsv1alpha1.SchemeGroupVersion + gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go index 7efe7e95a6..8ba00a69fc 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,7 +42,6 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration - transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -81,14 +80,6 @@ func WithNamespace(namespace string) SharedInformerOption { } } -// WithTransform sets a transform on all informers. -func WithTransform(transform cache.TransformFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.transform = transform - return factory - } -} - // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -193,7 +184,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) - informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -228,7 +218,6 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. - // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go index d5dabd6983..94f709e9bb 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package externalversions import ( - fmt "fmt" + "fmt" v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go index cb5a445987..8d1429d5f3 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go index b2cad1c067..c93d99e4be 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go index 455310ee4d..1ab51a9ed7 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go index 8caa586ce5..fd46dc78b7 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - context "context" + "context" time "time" - crdipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" versioned "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned" internalinterfaces "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" + v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -36,7 +36,7 @@ import ( // IPAMClaims. type IPAMClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() ipamclaimsv1alpha1.IPAMClaimLister + Lister() v1alpha1.IPAMClaimLister } type iPAMClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIPAMClaimInformer(client versioned.Interface, namespace string, return client.K8sV1alpha1().IPAMClaims(namespace).Watch(context.TODO(), options) }, }, - &crdipamclaimsv1alpha1.IPAMClaim{}, + &ipamclaimsv1alpha1.IPAMClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *iPAMClaimInformer) defaultInformer(client versioned.Interface, resyncPe } func (f *iPAMClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&crdipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) + return f.factory.InformerFor(&ipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) } -func (f *iPAMClaimInformer) Lister() ipamclaimsv1alpha1.IPAMClaimLister { - return ipamclaimsv1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) +func (f *iPAMClaimInformer) Lister() v1alpha1.IPAMClaimLister { + return v1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go index bb37e41381..086ab4ab65 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go index 474e11b48e..409fc70d06 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2025 The Kubernetes Authors +Copyright 2024 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - labels "k8s.io/apimachinery/pkg/labels" - listers "k8s.io/client-go/listers" - cache "k8s.io/client-go/tools/cache" + v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" ) // IPAMClaimLister helps list IPAMClaims. @@ -30,7 +30,7 @@ import ( type IPAMClaimLister interface { // List lists all IPAMClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) // IPAMClaims returns an object that can list and get IPAMClaims. IPAMClaims(namespace string) IPAMClaimNamespaceLister IPAMClaimListerExpansion @@ -38,17 +38,25 @@ type IPAMClaimLister interface { // iPAMClaimLister implements the IPAMClaimLister interface. type iPAMClaimLister struct { - listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] + indexer cache.Indexer } // NewIPAMClaimLister returns a new IPAMClaimLister. func NewIPAMClaimLister(indexer cache.Indexer) IPAMClaimLister { - return &iPAMClaimLister{listers.New[*ipamclaimsv1alpha1.IPAMClaim](indexer, ipamclaimsv1alpha1.Resource("ipamclaim"))} + return &iPAMClaimLister{indexer: indexer} +} + +// List lists all IPAMClaims in the indexer. +func (s *iPAMClaimLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IPAMClaim)) + }) + return ret, err } // IPAMClaims returns an object that can list and get IPAMClaims. func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister { - return iPAMClaimNamespaceLister{listers.NewNamespaced[*ipamclaimsv1alpha1.IPAMClaim](s.ResourceIndexer, namespace)} + return iPAMClaimNamespaceLister{indexer: s.indexer, namespace: namespace} } // IPAMClaimNamespaceLister helps list and get IPAMClaims. @@ -56,15 +64,36 @@ func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister type IPAMClaimNamespaceLister interface { // List lists all IPAMClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) // Get retrieves the IPAMClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*ipamclaimsv1alpha1.IPAMClaim, error) + Get(name string) (*v1alpha1.IPAMClaim, error) IPAMClaimNamespaceListerExpansion } // iPAMClaimNamespaceLister implements the IPAMClaimNamespaceLister // interface. type iPAMClaimNamespaceLister struct { - listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] + indexer cache.Indexer + namespace string +} + +// List lists all IPAMClaims in the indexer for a given namespace. +func (s iPAMClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IPAMClaim)) + }) + return ret, err +} + +// Get retrieves the IPAMClaim from the indexer for a given namespace and name. +func (s iPAMClaimNamespaceLister) Get(name string) (*v1alpha1.IPAMClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("ipamclaim"), name) + } + return obj.(*v1alpha1.IPAMClaim), nil } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go index bb4fc0e97d..ca94219215 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go @@ -4,13 +4,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts +//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts -//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset --output-dir ./apis/clientset .. +//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.28.0 client-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset .. -//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-dir ./apis/listers ./ +//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.28.0 lister-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers .. -//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers --output-dir ./apis/informers ./ +//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.28.0 informer-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers .. // +genclient // +kubebuilder:object:root=true @@ -35,14 +35,9 @@ type IPAMClaimSpec struct { Interface string `json:"interface"` } -// IPAMClaimStatus contains the observed status of the IPAMClaim. type IPAMClaimStatus struct { // The list of IP addresses (v4, v6) that were allocated for the pod interface IPs []string `json:"ips"` - // The name of the pod holding the IPAMClaim - OwnerPod OwnerPod `json:"ownerPod,omitempty"` - // Conditions contains details for one aspect of the current state of this API Resource - Conditions []metav1.Condition `json:"conditions,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -52,7 +47,3 @@ type IPAMClaimList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []IPAMClaim `json:"items"` } - -type OwnerPod struct { - Name string `json:"name"` -} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go index d68e38c3ee..737efd7a84 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go @@ -5,7 +5,6 @@ package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -91,14 +90,6 @@ func (in *IPAMClaimStatus) DeepCopyInto(out *IPAMClaimStatus) { *out = make([]string, len(*in)) copy(*out, *in) } - out.OwnerPod = in.OwnerPod - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMClaimStatus. @@ -110,18 +101,3 @@ func (in *IPAMClaimStatus) DeepCopy() *IPAMClaimStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OwnerPod) DeepCopyInto(out *OwnerPod) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerPod. -func (in *OwnerPod) DeepCopy() *OwnerPod { - if in == nil { - return nil - } - out := new(OwnerPod) - in.DeepCopyInto(out) - return out -} diff --git a/go-controller/vendor/golang.org/x/oauth2/README.md b/go-controller/vendor/golang.org/x/oauth2/README.md index 48dbb9d84c..781770c204 100644 --- a/go-controller/vendor/golang.org/x/oauth2/README.md +++ b/go-controller/vendor/golang.org/x/oauth2/README.md @@ -5,6 +5,15 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -24,11 +33,7 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://go.dev/doc/contribute. - -The git repository is https://go.googlesource.com/oauth2. - -Note: +this repository, see https://golang.org/doc/contribute.html. In particular: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/go-controller/vendor/golang.org/x/oauth2/oauth2.go b/go-controller/vendor/golang.org/x/oauth2/oauth2.go index 74f052aa9f..09f6a49b80 100644 --- a/go-controller/vendor/golang.org/x/oauth2/oauth2.go +++ b/go-controller/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scopes specifies optional requested permissions. + // Scope specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/go-controller/vendor/golang.org/x/oauth2/pkce.go b/go-controller/vendor/golang.org/x/oauth2/pkce.go index 6a95da975c..50593b6dfe 100644 --- a/go-controller/vendor/golang.org/x/oauth2/pkce.go +++ b/go-controller/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go deleted file mode 100644 index c2fe519714..0000000000 --- a/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go +++ /dev/null @@ -1,418 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "fmt" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// FindPort locates the container port for the given pod and portName. If the -// targetPort is a number, use that. If the targetPort is a string, look that -// string up in all named ports in all containers in the target pod. If no -// match is found, fail. -func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { - portName := svcPort.TargetPort - switch portName.Type { - case intstr.String: - name := portName.StrVal - for _, container := range pod.Spec.Containers { - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - // also support sidecar container (initContainer with restartPolicy=Always) - for _, container := range pod.Spec.InitContainers { - if container.RestartPolicy == nil || *container.RestartPolicy != v1.ContainerRestartPolicyAlways { - continue - } - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - case intstr.Int: - return portName.IntValue(), nil - } - - return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) -} - -// ContainerType signifies container type -type ContainerType int - -const ( - // Containers is for normal containers - Containers ContainerType = 1 << iota - // InitContainers is for init containers - InitContainers - // EphemeralContainers is for ephemeral containers - EphemeralContainers -) - -// AllContainers specifies that all containers be visited -const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers - -// AllFeatureEnabledContainers returns a ContainerType mask which includes all container -// types except for the ones guarded by feature gate. -func AllFeatureEnabledContainers() ContainerType { - return AllContainers -} - -// ContainerVisitor is called with each container spec, and returns true -// if visiting should continue. -type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool) - -// Visitor is called with each object name, and returns true if visiting should continue -type Visitor func(name string) (shouldContinue bool) - -func skipEmptyNames(visitor Visitor) Visitor { - return func(name string) bool { - if len(name) == 0 { - // continue visiting - return true - } - // delegate to visitor - return visitor(name) - } -} - -// VisitContainers invokes the visitor function with a pointer to every container -// spec in the given pod spec with type set in mask. If visitor returns false, -// visiting is short-circuited. VisitContainers returns true if visiting completes, -// false if visiting was short-circuited. -func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool { - if mask&InitContainers != 0 { - for i := range podSpec.InitContainers { - if !visitor(&podSpec.InitContainers[i], InitContainers) { - return false - } - } - } - if mask&Containers != 0 { - for i := range podSpec.Containers { - if !visitor(&podSpec.Containers[i], Containers) { - return false - } - } - } - if mask&EphemeralContainers != 0 { - for i := range podSpec.EphemeralContainers { - if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) { - return false - } - } - } - return true -} - -// VisitPodSecretNames invokes the visitor function with the name of every secret -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { - visitor = skipEmptyNames(visitor) - for _, reference := range pod.Spec.ImagePullSecrets { - if !visitor(reference.Name) { - return false - } - } - VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { - return visitContainerSecretNames(c, visitor) - }) - var source *v1.VolumeSource - - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.AzureFile != nil: - if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { - return false - } - case source.CephFS != nil: - if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { - return false - } - case source.Cinder != nil: - if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { - return false - } - case source.FlexVolume != nil: - if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { - return false - } - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].Secret != nil { - if !visitor(source.Projected.Sources[j].Secret.Name) { - return false - } - } - } - case source.RBD != nil: - if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { - return false - } - case source.Secret != nil: - if !visitor(source.Secret.SecretName) { - return false - } - case source.ScaleIO != nil: - if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { - return false - } - case source.ISCSI != nil: - if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { - return false - } - case source.StorageOS != nil: - if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { - return false - } - case source.CSI != nil: - if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) { - return false - } - } - } - return true -} - -// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference -func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.SecretRef != nil { - if !visitor(env.SecretRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { - if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { - return false - } - } - } - return true -} - -// VisitPodConfigmapNames invokes the visitor function with the name of every configmap -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { - visitor = skipEmptyNames(visitor) - VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { - return visitContainerConfigmapNames(c, visitor) - }) - var source *v1.VolumeSource - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].ConfigMap != nil { - if !visitor(source.Projected.Sources[j].ConfigMap.Name) { - return false - } - } - } - case source.ConfigMap != nil: - if !visitor(source.ConfigMap.Name) { - return false - } - } - } - return true -} - -// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference -func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.ConfigMapRef != nil { - if !visitor(env.ConfigMapRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { - if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { - return false - } - } - } - return true -} - -// GetContainerStatus extracts the status of container "name" from "statuses". -// It returns true if "name" exists, else returns false. -func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return v1.ContainerStatus{}, false -} - -// GetExistingContainerStatus extracts the status of container "name" from "statuses", -// It also returns if "name" exists. -func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { - status, _ := GetContainerStatus(statuses, name) - return status -} - -// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses", -// It returns (index, true) if "name" exists, else returns (0, false). -func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) { - for i := range statuses { - if statuses[i].Name == name { - return i, true - } - } - return 0, false -} - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := GetPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *v1.Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress. -func IsPodTerminal(pod *v1.Pod) bool { - return IsPodPhaseTerminal(pod.Status.Phase) -} - -// IsPodPhaseTerminal returns true if the pod's phase is terminal. -func IsPodPhaseTerminal(phase v1.PodPhase) bool { - return phase == v1.PodFailed || phase == v1.PodSucceeded -} - -// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status v1.PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == v1.ConditionTrue -} - -// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise. -func IsContainersReadyConditionTrue(status v1.PodStatus) bool { - condition := GetContainersReadyCondition(status) - return condition != nil && condition.Status == v1.ConditionTrue -} - -// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { - _, condition := GetPodCondition(&status, v1.PodReady) - return condition -} - -// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition { - _, condition := GetPodCondition(&status, v1.ContainersReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { - if status == nil { - return -1, nil - } - return GetPodConditionFromList(status.Conditions, conditionType) -} - -// GetPodConditionFromList extracts the provided condition from the given list of condition and -// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. -func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { - if conditions == nil { - return -1, nil - } - for i := range conditions { - if conditions[i].Type == conditionType { - return i, &conditions[i] - } - } - return -1, nil -} - -// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual -} - -// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways. -// This function is not checking if the container passed to it is indeed an init container. -// It is just checking if the container restart policy has been set to always. -func IsRestartableInitContainer(initContainer *v1.Container) bool { - if initContainer == nil || initContainer.RestartPolicy == nil { - return false - } - return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways -} diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 7636490960..5732a53975 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -197,8 +197,8 @@ github.com/juju/errors # github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 ## explicit; go 1.17 github.com/k8snetworkplumbingwg/govdpa/pkg/kvdpa -# github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha -## explicit; go 1.23.0 +# github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha +## explicit; go 1.20 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake @@ -504,8 +504,8 @@ golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.27.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.23.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.12.0 @@ -677,7 +677,7 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.5 +# k8s.io/api v0.32.3 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -742,7 +742,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.32.5 +# k8s.io/apimachinery v0.32.3 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -807,7 +807,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.5 +# k8s.io/client-go v0.32.3 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1176,7 +1176,6 @@ k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubernetes v1.32.6 ## explicit; go 1.23.0 -k8s.io/kubernetes/pkg/api/v1/pod k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/probe k8s.io/kubernetes/pkg/probe/http diff --git a/test/conformance/go.mod b/test/conformance/go.mod index de64ed280e..b3763a3068 100644 --- a/test/conformance/go.mod +++ b/test/conformance/go.mod @@ -39,11 +39,12 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/conformance/go.sum b/test/conformance/go.sum index 175ec601cc..1e5b55a8e9 100644 --- a/test/conformance/go.sum +++ b/test/conformance/go.sum @@ -23,6 +23,7 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -105,13 +106,14 @@ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2F golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -123,6 +125,7 @@ golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= @@ -138,6 +141,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= diff --git a/test/e2e/deploymentconfig/api/api.go b/test/e2e/deploymentconfig/api/api.go index dc43e87c9b..573ced8cb8 100644 --- a/test/e2e/deploymentconfig/api/api.go +++ b/test/e2e/deploymentconfig/api/api.go @@ -4,7 +4,6 @@ package api // Remove when OVN-Kubernetes exposes its config via an API. type DeploymentConfig interface { OVNKubernetesNamespace() string - FRRK8sNamespace() string ExternalBridgeName() string PrimaryInterfaceName() string } diff --git a/test/e2e/deploymentconfig/configs/kind/kind.go b/test/e2e/deploymentconfig/configs/kind/kind.go index d05c6a7061..be3f35aa73 100644 --- a/test/e2e/deploymentconfig/configs/kind/kind.go +++ b/test/e2e/deploymentconfig/configs/kind/kind.go @@ -33,10 +33,6 @@ func (k kind) OVNKubernetesNamespace() string { return "ovn-kubernetes" } -func (k kind) FRRK8sNamespace() string { - return "frr-k8s-system" -} - func (k kind) ExternalBridgeName() string { return "breth0" } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index e5bbde7d42..be1b46bf75 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -712,7 +712,7 @@ var _ = ginkgo.Describe("e2e control plane", func() { } secondaryExternalContainerPort := infraprovider.Get().GetExternalContainerPort() secondaryExternalContainerSpec := infraapi.ExternalContainer{Name: "e2e-ovn-k", Image: images.AgnHost(), - Network: secondaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} + Network: secondaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} ginkgo.By("creating container on secondary provider network") secondaryExternalContainer, err = providerCtx.CreateExternalContainer(secondaryExternalContainerSpec) framework.ExpectNoError(err, "failed to create external container") @@ -1275,7 +1275,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress", Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external service", externalContainer.String()) }) @@ -1672,7 +1672,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress-add-more", Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) @@ -1834,7 +1834,7 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation", framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) }) @@ -1943,7 +1943,7 @@ var _ = ginkgo.Describe("e2e br-int flow monitoring export validation", func() { primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get primary network") collectorExternalContainer := infraapi.ExternalContainer{Name: getContainerName(collectorPort), Image: "cloudflare/goflow", - Network: primaryProviderNetwork, CmdArgs: []string{"-kafka=false"}, ExtPort: collectorPort} + Network: primaryProviderNetwork, Args: []string{"-kafka=false"}, ExtPort: collectorPort} collectorExternalContainer, err = providerCtx.CreateExternalContainer(collectorExternalContainer) if err != nil { framework.Failf("failed to start flow collector container %s: %v", getContainerName(collectorPort), err) diff --git a/test/e2e/egress_firewall.go b/test/e2e/egress_firewall.go index abbc26b524..32974beb1c 100644 --- a/test/e2e/egress_firewall.go +++ b/test/e2e/egress_firewall.go @@ -197,7 +197,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressF Name: externalContainerName1, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, + Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, ExtPort: externalContainer1Port, } externalContainer1, err = providerCtx.CreateExternalContainer(externalContainer1Spec) @@ -210,7 +210,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressF Name: externalContainerName2, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, + Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, ExtPort: externalContainer2Port, } externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2Spec) diff --git a/test/e2e/egress_services.go b/test/e2e/egress_services.go index ee2fec30f4..2afcb2edc8 100644 --- a/test/e2e/egress_services.go +++ b/test/e2e/egress_services.go @@ -85,7 +85,7 @@ var _ = ginkgo.Describe("EgressService", feature.EgressService, func() { framework.ExpectNoError(err, "failed to get primary provider network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, ExtPort: 8080, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(8080)} + Args: getAgnHostHTTPPortBindCMDArgs(8080)} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") }) @@ -1239,7 +1239,7 @@ metadata: ginkgo.By(fmt.Sprintf("Creating container %s", net.containerName)) // Setting the --hostname here is important since later we poke the container's /hostname endpoint extContainerSecondaryNet := infraapi.ExternalContainer{Name: net.containerName, Image: images.AgnHost(), Network: network, - CmdArgs: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} + Args: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} extContainerSecondaryNet, err = providerCtx.CreateExternalContainer(extContainerSecondaryNet) ginkgo.By(fmt.Sprintf("Adding a listener for the shared IPv4 %s on %s", sharedIPv4, net.containerName)) out, err := infraprovider.Get().ExecExternalContainerCommand(extContainerSecondaryNet, []string{"ip", "address", "add", sharedIPv4 + "/32", "dev", "lo"}) diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index b2f75254f7..d9d281aa7b 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -219,7 +219,7 @@ func isSupportedAgnhostForEIP(externalContainer infraapi.ExternalContainer) bool if externalContainer.Image != images.AgnHost() { return false } - if !util.SliceHasStringItem(externalContainer.CmdArgs, "netexec") { + if !util.SliceHasStringItem(externalContainer.Args, "netexec") { return false } return true @@ -754,13 +754,13 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP // attach containers to the primary network primaryTargetExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryTargetExternalContainerSpec := infraapi.ExternalContainer{Name: targetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} + Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} primaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(primaryTargetExternalContainerSpec) framework.ExpectNoError(err, "failed to create external target container on primary network", primaryTargetExternalContainerSpec.String()) primaryDeniedExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryDeniedExternalContainerSpec := infraapi.ExternalContainer{Name: deniedTargetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} + Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} primaryDeniedExternalContainer, err = providerCtx.CreateExternalContainer(primaryDeniedExternalContainerSpec) framework.ExpectNoError(err, "failed to create external denied container on primary network", primaryDeniedExternalContainer.String()) @@ -791,7 +791,7 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP Name: targetSecondaryNodeName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), + Args: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), ExtPort: secondaryTargetExternalContainerPort, } secondaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(secondaryTargetExternalContainerSpec) @@ -972,7 +972,7 @@ spec: if isClusterDefaultNetwork(netConfigParams) { pod2IP = getPodAddress(pod2Name, f.Namespace.Name) } else { - pod2IP, err = getPodAnnotationIPsForAttachmentByIndex( + pod2IP, err = podIPsForUserDefinedPrimaryNetwork( f.ClientSet, f.Namespace.Name, pod2Name, @@ -2125,7 +2125,7 @@ spec: providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get providers primary network") externalContainerPrimary := infraapi.ExternalContainer{Name: "external-container-for-egressip-mtu-test", Image: images.AgnHost(), - Network: providerPrimaryNetwork, CmdArgs: []string{"pause"}, ExtPort: externalContainerPrimaryPort} + Network: providerPrimaryNetwork, Args: []string{"pause"}, ExtPort: externalContainerPrimaryPort} externalContainerPrimary, err = providerCtx.CreateExternalContainer(externalContainerPrimary) framework.ExpectNoError(err, "failed to create external container: %s", externalContainerPrimary.String()) diff --git a/test/e2e/external_gateways.go b/test/e2e/external_gateways.go index c3b2f12198..4a119ae96b 100644 --- a/test/e2e/external_gateways.go +++ b/test/e2e/external_gateways.go @@ -42,16 +42,6 @@ const ( anyLink = "any" ) -// GatewayRemovalType defines ways to remove pod as external gateway -type GatewayRemovalType string - -const ( - GatewayUpdate GatewayRemovalType = "GatewayUpdate" - GatewayDelete GatewayRemovalType = "GatewayDelete" - GatewayDeletionTimestamp GatewayRemovalType = "GatewayDeletionTimestamp" - GatewayNotReady GatewayRemovalType = "GatewayNotReady" -) - func getOverrideNetwork() (string, string, string) { // When the env variable is specified, we use a different docker network for // containers acting as external gateways. @@ -144,7 +134,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, CmdArgs: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, Args: []string{"pause"}} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container") if network.Name() == "host" { @@ -238,7 +228,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { // start the container that will act as a new external gateway that the tests will be updated to use externalContainer2Port := infraprovider.Get().GetExternalContainerPort() externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate2, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, CmdArgs: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, Args: []string{"pause"}} externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerNameTemplate2, externalContainerPort)) if network.Name() == "host" { @@ -365,7 +355,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerTemplate, externalContainerPort), Image: images.AgnHost(), Network: network, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerTemplate, externalContainerPort)) if network.Name() == "host" { @@ -885,15 +875,10 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { + ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, deletePod bool) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } - - if removalType == GatewayNotReady { - recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, nil) - } - ginkgo.By("Annotate the external gw pods to manage the src app pod namespace") for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { networkIPs := fmt.Sprintf("\"%s\"", addresses.gatewayIPs[i]) @@ -940,9 +925,15 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { totalPodConnEntries := pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil) gomega.Expect(totalPodConnEntries).To(gomega.Equal(6)) // total conntrack entries for this pod/protocol - cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], true) - if cleanUpFn != nil { - defer cleanUpFn() + if deletePod { + ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName2, servingNamespace)) + err = f.ClientSet.CoreV1().Pods(servingNamespace).Delete(context.TODO(), gatewayPodName2, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) + // give some time to handle pod delete event + time.Sleep(5 * time.Second) + } else { + ginkgo.By("Remove second external gateway pod's routing-namespace annotation") + annotatePodForGateway(gatewayPodName2, servingNamespace, "", addresses.gatewayIPs[1], false) } // ensure the conntrack deletion tracker annotation is updated @@ -982,20 +973,12 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(podConnEntriesWithMACLabelsSet).To(gomega.Equal(0)) // we don't have any remaining gateways left gomega.Expect(totalPodConnEntries).To(gomega.Equal(4)) // 6-2 }, - ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), - ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), - ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), - ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), - ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", GatewayDelete), - ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", GatewayDelete), - ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), - ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), - ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), - ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), + ginkgo.Entry("IPV4 udp", &addressesv4, "udp", false), + ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp", false), + ginkgo.Entry("IPV6 udp", &addressesv6, "udp", false), + ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp", false), + ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", true), + ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", true), ) }) @@ -2000,15 +1983,11 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { + ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } - if removalType == GatewayNotReady { - recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, map[string]string{"name": gatewayPodName2, "gatewayPod": "true"}) - } - for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { annotateMultusNetworkStatusInPodGateway(gwPod, servingNamespace, []string{addresses.gatewayIPs[i], addresses.gatewayIPs[i]}) } @@ -2047,10 +2026,10 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { }, time.Minute, 5).Should(gomega.Equal(podConnEntriesWithMACLabelsSet)) gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) // total conntrack entries for this pod/protocol - cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], false) - if cleanUpFn != nil { - defer cleanUpFn() - } + ginkgo.By("Remove second external gateway pod's routing-namespace annotation") + p := getGatewayPod(f, servingNamespace, gatewayPodName2) + p.Labels = map[string]string{"name": gatewayPodName2} + updatePod(f, p) ginkgo.By("Check if conntrack entries for ECMP routes are removed for the deleted external gateway if traffic is UDP") @@ -2065,7 +2044,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) ginkgo.By("Remove first external gateway pod's routing-namespace annotation") - p := getGatewayPod(f, servingNamespace, gatewayPodName1) + p = getGatewayPod(f, servingNamespace, gatewayPodName1) p.Labels = map[string]string{"name": gatewayPodName1} updatePod(f, p) @@ -2081,19 +2060,11 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) checkAPBExternalRouteStatus(defaultPolicyName) }, - ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), - ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), - ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), - ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), - ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), - ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), - ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), - ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), - ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), - ) + ginkgo.Entry("IPV4 udp", &addressesv4, "udp"), + ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp"), + ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), + ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) + }) // BFD Tests are dual of external gateway. The only difference is that they enable BFD on ovn and @@ -2922,9 +2893,9 @@ func setupGatewayContainers(f *framework.Framework, providerCtx infraapi.Context var err error externalContainer1 := infraapi.ExternalContainer{Name: getContainerName(container1Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(container2Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} gwContainers := []infraapi.ExternalContainer{externalContainer1, externalContainer2} addressesv4 := gatewayTestIPs{targetIPs: make([]string, 0)} @@ -3175,12 +3146,12 @@ func setupGatewayContainersForConntrackTest(f *framework.Framework, providerCtx addressesv6 := gatewayTestIPs{gatewayIPs: make([]string, 2)} ginkgo.By("Creating the gateway containers for the UDP test") gwExternalContainer1 := infraapi.ExternalContainer{Name: getContainerName(gwContainer1Template, 12345), - Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} gwExternalContainer1, err = providerCtx.CreateExternalContainer(gwExternalContainer1) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer1) gwExternalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainer2Template, 12345), - Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} gwExternalContainer2, err = providerCtx.CreateExternalContainer(gwExternalContainer2) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer2) if network.Name() == "host" { @@ -3624,133 +3595,3 @@ func resetGatewayAnnotations(f *framework.Framework) { annotation}...) } } - -func setupPodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) (*corev1.Pod, error) { - // Handle bash -c commands specially to preserve argument structure - if len(command) >= 3 && command[0] == "bash" && command[1] == "-c" { - // Extract the script part and wrap it to preserve logic - script := strings.Join(command[2:], " ") - command = []string{"bash", "-c", "touch /tmp/ready && (" + script + ")"} - } else { - // For non-bash commands, preserve their structure - var quotedArgs []string - for _, arg := range command { - // Escape single quotes and wrap in single quotes - escaped := strings.ReplaceAll(arg, "'", "'\"'\"'") - quotedArgs = append(quotedArgs, "'"+escaped+"'") - } - command = []string{"bash", "-c", "touch /tmp/ready && " + strings.Join(quotedArgs, " ")} - } - return createPod(f, podName, nodeSelector, namespace, command, labels, func(p *corev1.Pod) { - p.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"cat", "/tmp/ready"}, - }, - }, - InitialDelaySeconds: 5, - PeriodSeconds: 5, - FailureThreshold: 1, - } - }) -} - -func recreatePodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) { - ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", podName, namespace)) - err := deletePodWithWaitByName(context.TODO(), f.ClientSet, podName, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Delete second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) - - ginkgo.By(fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe", podName, namespace)) - _, err = setupPodWithReadinessProbe(f, podName, nodeSelector, namespace, command, labels) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe, failed: %v", podName, namespace, err)) - gomega.Eventually(func() bool { - var p *corev1.Pod - p, err = f.ClientSet.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return false - } - for _, condition := range p.Status.Conditions { - if condition.Type == corev1.PodReady { - return condition.Status == corev1.ConditionTrue - } - } - return false - }).Should(gomega.Equal(true), fmt.Sprintf("Readiness probe for second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) -} - -func handleGatewayPodRemoval(f *framework.Framework, removalType GatewayRemovalType, gatewayPodName, servingNamespace, gatewayIP string, isAnnotated bool) func() { - var err error - switch removalType { - case GatewayDelete: - ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName, servingNamespace)) - err := deletePodWithWaitByName(context.TODO(), f.ClientSet, gatewayPodName, servingNamespace) - framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) - return nil - case GatewayUpdate: - if isAnnotated { - ginkgo.By("Remove second external gateway pod's routing-namespace annotation") - annotatePodForGateway(gatewayPodName, servingNamespace, "", gatewayIP, false) - return nil - } - - ginkgo.By("Updating external gateway pod labels") - p := getGatewayPod(f, servingNamespace, gatewayPodName) - p.Labels = map[string]string{"name": gatewayPodName} - updatePod(f, p) - return nil - case GatewayDeletionTimestamp: - ginkgo.By("Setting finalizer then deleting external gateway pod with grace period to set deletion timestamp") - p := getGatewayPod(f, servingNamespace, gatewayPodName) - p.Finalizers = append(p.Finalizers, "k8s.ovn.org/external-gw-pod-finalizer") - updatePod(f, p) - gomega.Eventually(func() bool { - p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) - if err != nil { - return false - } - return strings.Contains(strings.Join(p.GetFinalizers(), ","), "k8s.ovn.org/external-gw-pod-finalizer") - }).Should(gomega.Equal(true), fmt.Sprintf("Update second external gateway pod %s from ns %s with finalizer, failed: %v", gatewayPodName, servingNamespace, err)) - - p = getGatewayPod(f, servingNamespace, gatewayPodName) - err = e2epod.DeletePodWithGracePeriod(context.Background(), f.ClientSet, p, 1000) - framework.ExpectNoError(err, fmt.Sprintf("unable to delete pod with grace period: %s, err: %v", p.Name, err)) - - gomega.Eventually(func() bool { - p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) - if err != nil { - return false - } - return p.DeletionTimestamp != nil - }).Should(gomega.BeTrue(), fmt.Sprintf("Gateway pod %s in ns %s should have deletion timestamp, failed: %v", gatewayPodName, servingNamespace, err)) - - // return a function to remove the finalizer - return func() { - p = getGatewayPod(f, servingNamespace, gatewayPodName) - p.Finalizers = []string{} - updatePod(f, p) - } - case GatewayNotReady: - ginkgo.By("Remove /tmp/ready in external gateway pod so that readiness probe fails") - _, err = e2ekubectl.RunKubectl(servingNamespace, "exec", gatewayPodName, "--", "rm", "/tmp/ready") - framework.ExpectNoError(err, fmt.Sprintf("unable to remove /tmp/ready in pod: %s, err: %v", gatewayPodName, err)) - gomega.Eventually(func() bool { - var p *corev1.Pod - p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) - if err != nil { - return false - } - podReadyStatus := corev1.ConditionTrue - for _, condition := range p.Status.Conditions { - if condition.Type == corev1.PodReady { - podReadyStatus = condition.Status - break - } - } - return podReadyStatus == corev1.ConditionFalse - }).WithTimeout(5*time.Minute).Should(gomega.Equal(true), fmt.Sprintf("Mark second external gateway pod %s from ns %s not ready, failed: %v", gatewayPodName, servingNamespace, err)) - return nil - default: - framework.Failf("unexpected GatewayRemovalType passed: %s", removalType) - return nil - } -} diff --git a/test/e2e/feature/features.go b/test/e2e/feature/features.go index e7c3920477..842b0474e6 100644 --- a/test/e2e/feature/features.go +++ b/test/e2e/feature/features.go @@ -23,7 +23,6 @@ var ( MultiHoming = New("MultiHoming") NodeIPMACMigration = New("NodeIPMACMigration") OVSCPUPin = New("OVSCPUPin") - RouteAdvertisements = New("RouteAdvertisements") Unidle = New("Unidle") ) diff --git a/test/e2e/go.mod b/test/e2e/go.mod index d9d67fb0c4..95ac4ff6ae 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -6,16 +6,16 @@ toolchain go1.23.6 require ( github.com/google/go-cmp v0.6.0 - github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 golang.org/x/sync v0.12.0 - k8s.io/api v0.32.5 - k8s.io/apimachinery v0.32.5 - k8s.io/client-go v0.32.5 + k8s.io/api v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 k8s.io/klog v1.0.0 k8s.io/kubernetes v1.32.6 k8s.io/pod-security-admission v0.32.3 @@ -148,7 +148,7 @@ require ( golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 900d7aa612..6838af0973 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -331,8 +331,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= -github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -676,8 +676,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -986,19 +986,19 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= -k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= -k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= -k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/cloud-provider v0.32.3 h1:WC7KhWrqXsU4b0E4tjS+nBectGiJbr1wuc1TpWXvtZM= k8s.io/cloud-provider v0.32.3/go.mod h1:/fwBfgRPuh16n8vLHT+PPT+Bc4LAEaJYj38opO2wsYY= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index 1d2d3466fb..5ef104b7f3 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "strings" - - "k8s.io/kubernetes/test/e2e/framework" ) // Provider represents the infrastructure provider @@ -39,21 +37,6 @@ type Provider interface { GetK8HostPort() uint16 // supported K8 host ports } -// Underlay represents the configuration for an underlay network. -// Note: The physical network referenced by PhysicalNetworkName must be pre-created and available. -type Underlay struct { - // PhysicalNetworkName is the name of the pre-created physical network to use. - PhysicalNetworkName string - // LogicalNetworkName is the logical network name to be used. - LogicalNetworkName string - // BridgeName is the name of the bridge associated with the underlay. - BridgeName string - // PortName is the name of the port on the bridge. - PortName string - // VlanID is the VLAN identifier for the underlay network. - VlanID int -} - type Context interface { CreateExternalContainer(container ExternalContainer) (ExternalContainer, error) DeleteExternalContainer(container ExternalContainer) error @@ -63,7 +46,6 @@ type Context interface { AttachNetwork(network Network, instance string) (NetworkInterface, error) DetachNetwork(network Network, instance string) error GetAttachedNetworks() (Networks, error) - SetupUnderlay(f *framework.Framework, underlay Underlay) error AddCleanUpFn(func() error) } @@ -182,15 +164,13 @@ func (n NetworkInterface) GetMAC() string { } type ExternalContainer struct { - Name string - Image string - Network Network - Entrypoint string - CmdArgs []string - ExtPort uint16 - IPv4 string - IPv6 string - RuntimeArgs []string + Name string + Image string + Network Network + Args []string + ExtPort uint16 + IPv4 string + IPv6 string } func (ec ExternalContainer) GetName() string { @@ -228,7 +208,7 @@ func (ec ExternalContainer) IsIPv6() bool { } func (ec ExternalContainer) String() string { - str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, RuntimeArgs: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.RuntimeArgs, " "), strings.Join(ec.CmdArgs, " ")) + str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.Args, " ")) if ec.IsIPv4() { str = fmt.Sprintf("%s, IPv4 address: %q", str, ec.GetIPv4()) } @@ -249,6 +229,9 @@ func (ec ExternalContainer) IsValidPreCreateContainer() (bool, error) { if ec.Network.String() == "" { errs = append(errs, errors.New("network is not set")) } + if ec.ExtPort == 0 { + errs = append(errs, errors.New("port is not set")) + } if len(errs) == 0 { return true, nil } diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index 8c068c7411..f58a5bc746 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -13,12 +13,10 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/ovn-org/ovn-kubernetes/test/e2e/containerengine" - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/portalloc" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" utilnet "k8s.io/utils/net" @@ -147,13 +145,9 @@ func (c *contextKind) createExternalContainer(container api.ExternalContainer) ( return container, fmt.Errorf("container %s already exists", container.Name) } cmd := []string{"run", "-itd", "--privileged", "--name", container.Name, "--network", container.Network.Name(), "--hostname", container.Name} - if container.Entrypoint != "" { - cmd = append(cmd, "--entrypoint", container.Entrypoint) - } - cmd = append(cmd, container.RuntimeArgs...) cmd = append(cmd, container.Image) - if len(container.CmdArgs) > 0 { - cmd = append(cmd, container.CmdArgs...) + if len(container.Args) > 0 { + cmd = append(cmd, container.Args...) } else { if images.AgnHost() == container.Image { cmd = append(cmd, "pause") @@ -365,83 +359,6 @@ func (c *contextKind) getAttachedNetworks() (api.Networks, error) { return attachedNetworks, nil } -func (c *contextKind) SetupUnderlay(f *framework.Framework, underlay api.Underlay) error { - if underlay.LogicalNetworkName == "" { - return fmt.Errorf("underlay logical network name must be set") - } - - if underlay.PhysicalNetworkName == "" { - underlay.PhysicalNetworkName = "underlay" - } - - if underlay.BridgeName == "" { - underlay.BridgeName = secondaryBridge - } - - const ( - ovsKubeNodeLabel = "app=ovnkube-node" - ) - - ovsPodList, err := f.ClientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( - context.Background(), - metav1.ListOptions{LabelSelector: ovsKubeNodeLabel}, - ) - if err != nil { - return fmt.Errorf("failed to list OVS pods with label %q at namespace %q: %w", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace(), err) - } - - if len(ovsPodList.Items) == 0 { - return fmt.Errorf("no pods with label %q in namespace %q", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace()) - } - for _, ovsPod := range ovsPodList.Items { - if underlay.BridgeName != deploymentconfig.Get().ExternalBridgeName() { - underlayInterface, err := getNetworkInterface(ovsPod.Spec.NodeName, underlay.PhysicalNetworkName) - if err != nil { - return fmt.Errorf("failed to get underlay interface for network %s on node %s: %w", underlay.PhysicalNetworkName, ovsPod.Spec.NodeName, err) - } - c.AddCleanUpFn(func() error { - if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { - return fmt.Errorf("failed to remove OVS bridge %s for pod %s/%s during cleanup: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) - } - return nil - }) - if err := ensureOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { - return fmt.Errorf("failed to add OVS bridge %s for pod %s/%s: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) - } - - if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName); err != nil { - return fmt.Errorf("failed to attach port %s to bridge %s for pod %s/%s: %w", underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) - } - if underlay.VlanID > 0 { - if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName, underlay.VlanID); err != nil { - return fmt.Errorf("failed to enable VLAN %d on port %s for bridge %s for pod %s/%s: %w", underlay.VlanID, underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) - } - } - } - c.AddCleanUpFn(func() error { - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - ); err != nil { - return fmt.Errorf("failed to restore default bridge mappings for pod %s/%s during cleanup: %w", ovsPod.Namespace, ovsPod.Name, err) - } - return nil - }) - - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - bridgeMapping(underlay.LogicalNetworkName, underlay.BridgeName), - ); err != nil { - return fmt.Errorf("failed to configure bridge mappings for pod %s/%s for logical network %s to bridge %s: %w", ovsPod.Namespace, ovsPod.Name, underlay.LogicalNetworkName, underlay.BridgeName, err) - } - } - return nil - -} - func (c *contextKind) AddCleanUpFn(cleanUpFn func() error) { c.Lock() defer c.Unlock() @@ -497,13 +414,13 @@ func (c *contextKind) cleanUp() error { const ( nameFormat = "{{.Name}}" - inspectNetworkIPv4GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .Gateway }}{{ end }}" - inspectNetworkIPv4AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPAddress }}{{ end }}" - inspectNetworkIPv4PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPPrefixLen }}{{ end }}" - inspectNetworkIPv6GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPv6Gateway }}{{ end }}" - inspectNetworkIPv6AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6Address }}{{ end }}" - inspectNetworkIPv6PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6PrefixLen }}{{ end }}" - inspectNetworkMACKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .MacAddress }}{{ end }}" + inspectNetworkIPv4GWKeyStr = "{{ .NetworkSettings.Networks.%s.Gateway }}" + inspectNetworkIPv4AddrKeyStr = "{{ .NetworkSettings.Networks.%s.IPAddress }}" + inspectNetworkIPv4PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.IPPrefixLen }}" + inspectNetworkIPv6GWKeyStr = "{{ .NetworkSettings.Networks.%s.IPv6Gateway }}" + inspectNetworkIPv6AddrKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6Address }}" + inspectNetworkIPv6PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6PrefixLen }}" + inspectNetworkMACKeyStr = "{{ .NetworkSettings.Networks.%s.MacAddress }}" inspectNetworkContainersKeyStr = "{{ range $key, $value := .Containers }}{{ printf \"%s\\n\" $value.Name}}{{ end }}'" emptyValue = "" ) diff --git a/test/e2e/infraprovider/providers/kind/ovs.go b/test/e2e/infraprovider/providers/kind/ovs.go deleted file mode 100644 index 337ae4e702..0000000000 --- a/test/e2e/infraprovider/providers/kind/ovs.go +++ /dev/null @@ -1,93 +0,0 @@ -package kind - -import ( - "fmt" - "strings" - "time" - - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" - - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" -) - -const ( - secondaryBridge = "ovsbr1" -) - -func ensureOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := fmt.Sprintf("ovs-vsctl br-exists %[1]s || ovs-vsctl add-br %[1]s", bridgeName) - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func removeOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := fmt.Sprintf("if ovs-vsctl br-exists %[1]s; then ovs-vsctl del-br %[1]s; fi", bridgeName) - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { - cmd := fmt.Sprintf("ovs-vsctl list port %[2]s || ovs-vsctl add-port %[1]s %[2]s", bridgeName, portName) - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to addadd port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { - cmd := fmt.Sprintf("ovs-vsctl set port %[1]s tag=%[2]d vlan_mode=access", portName, vlanID) - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to enable vlan access port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -type BridgeMapping struct { - physnet string - ovsBridge string -} - -func (bm BridgeMapping) String() string { - return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) -} - -type BridgeMappings []BridgeMapping - -func (bms BridgeMappings) String() string { - return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") -} - -func Map[T, V any](items []T, fn func(T) V) []V { - result := make([]V, len(items)) - for i, t := range items { - result[i] = fn(t) - } - return result -} - -func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { - mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) - cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) - } - return nil -} - -func defaultNetworkBridgeMapping() BridgeMapping { - return BridgeMapping{ - physnet: "physnet", - ovsBridge: deploymentconfig.Get().ExternalBridgeName(), - } -} - -func bridgeMapping(physnet, ovsBridge string) BridgeMapping { - return BridgeMapping{ - physnet: physnet, - ovsBridge: ovsBridge, - } -} diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 67ab2e290a..4ae12854a2 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -8,6 +8,7 @@ import ( "net" "net/netip" "os" + "os/exec" "strings" "sync" "time" @@ -102,7 +103,6 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun fr = wrappedTestFramework("kv-live-migration") d = diagnostics.New(fr) crClient crclient.Client - virtClient *kubevirt.Client namespace string iperf3DefaultPort = int32(5201) tcpServerPort = int32(9900) @@ -300,7 +300,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun output := "" Eventually(func() error { var err error - output, err = virtClient.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) + output, err = kubevirt.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) return err }). WithPolling(polling). @@ -316,7 +316,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun polling := 15 * time.Second for podName, serverPodIPs := range serverPodIPsByName { for _, serverPodIP := range serverPodIPs { - output, err := virtClient.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) + output, err := kubevirt.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) if err != nil { return fmt.Errorf("%s: %w", output, err) } @@ -361,7 +361,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun for _, podIP := range podIPs { iperfLogFile := fmt.Sprintf("/tmp/%s_%s_iperf3.log", podName, podIP) execFn := func(cmd string) (string, error) { - return virtClient.RunCommand(vmi, cmd, 2*time.Second) + return kubevirt.RunCommand(vmi, cmd, 2*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -393,10 +393,10 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun return nil } - startNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) error { + startNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) } return startNorthSouthIperfTraffic(execFn, addresses, port, "ingress", stage) } @@ -404,18 +404,18 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun startNorthSouthEgressIperfTraffic = func(vmi *kubevirtv1.VirtualMachineInstance, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return virtClient.RunCommand(vmi, cmd, 5*time.Second) + return kubevirt.RunCommand(vmi, cmd, 5*time.Second) } return startNorthSouthIperfTraffic(execFn, addresses, port, "egress", stage) } - checkNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) { + checkNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) { GinkgoHelper() Expect(addresses).NotTo(BeEmpty()) for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/ingress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -431,7 +431,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/egress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return virtClient.RunCommand(vmi, cmd, 5*time.Second) + return kubevirt.RunCommand(vmi, cmd, 5*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -446,7 +446,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun continue } cmd := fmt.Sprintf("ping -c 3 -W 2 %s", ip) - stdout, err := virtClient.RunCommand(vmi, cmd, 5*time.Second) + stdout, err := kubevirt.RunCommand(vmi, cmd, 5*time.Second) Expect(err).NotTo(HaveOccurred()) Expect(stdout).To(ContainSubstring(" 0% packet loss")) } @@ -472,7 +472,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun networkStatuses, err = podNetworkStatus(pod, networkStatusPredicate) return networkStatuses, err }). - WithTimeout(15 * time.Second). + WithTimeout(5 * time.Second). WithPolling(200 * time.Millisecond). Should(HaveLen(1)) for _, ip := range networkStatuses[0].IPs { @@ -514,7 +514,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun step = by(vmName, stage+": Check n/s tcp traffic") output := "" Eventually(func() error { - output, err = virtClient.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) + output, err = kubevirt.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) return err }). WithPolling(polling). @@ -730,7 +730,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun addressByFamily = func(familyFn func(iface kubevirt.Interface) []kubevirt.Address, vmi *kubevirtv1.VirtualMachineInstance) func() ([]kubevirt.Address, error) { return func() ([]kubevirt.Address, error) { - networkState, err := kubevirt.RetrieveNetworkState(virtClient, vmi) + networkState, err := kubevirt.RetrieveNetworkState(vmi) if err != nil { return nil, err } @@ -831,14 +831,14 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun addresses, err := addressByFamily(ipv4, vmi)() Expect(err).NotTo(HaveOccurred()) if isDualStack() { - output, err := virtClient.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) + output, err := kubevirt.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) Expect(err).NotTo(HaveOccurred(), output) step = by(vmi.Name, "Wait for virtual machine to receive IPv6 address from DHCP") Eventually(addressByFamily(ipv6, vmi)). WithPolling(time.Second). WithTimeout(5*time.Minute). Should(HaveLen(2), func() string { - output, _ := virtClient.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) + output, _ := kubevirt.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) return step + " -> journal nmstate: " + output }) ipv6Addresses, err := addressByFamily(ipv6, vmi)() @@ -1076,7 +1076,7 @@ passwd: } err := crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi) Expect(err).NotTo(HaveOccurred()) - Expect(virtClient.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) + Expect(kubevirt.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) waitVirtualMachineAddresses(vmi) @@ -1084,7 +1084,7 @@ passwd: svc, err := fr.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), composeService("tcpserver", vm.Name, tcpServerPort), metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred(), step) defer func() { - output, err := virtClient.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) + output, err := kubevirt.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) Expect(err).NotTo(HaveOccurred()) fmt.Printf("%s tcpserver logs: %s", vmi.Name, output) }() @@ -1132,15 +1132,11 @@ passwd: g.Expect(pod.Status.PodIP).NotTo(BeEmpty(), "pod %s has no valid IP address yet", pod.Name) } - sanitizeNodeName = func(nodeName string) string { - return strings.ReplaceAll(nodeName, ".", "-") - } - createHTTPServerPods = func(annotations map[string]string) []*corev1.Pod { var pods []*corev1.Pod for _, selectedNode := range selectedNodes { pod := composeAgnhostPod( - "testpod-"+sanitizeNodeName(selectedNode.Name), + "testpod-"+selectedNode.Name, namespace, selectedNode.Name, "netexec", "--http-port", "8000") @@ -1210,7 +1206,7 @@ fi IPRequest: staticIPs, } } - pod, err := createPod(fr, "testpod-"+sanitizeNodeName(node.Name), node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { + pod, err := createPod(fr, "testpod-"+node.Name, node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { if nse != nil { pod.Annotations = networkSelectionElements(*nse) } @@ -1338,9 +1334,6 @@ fi var err error crClient, err = newControllerRuntimeClient() Expect(err).NotTo(HaveOccurred()) - - virtClient, err = kubevirt.NewClient("/tmp") - Expect(err).NotTo(HaveOccurred()) }) Context("with default pod network", Ordered, func() { @@ -1422,7 +1415,7 @@ fi Expect(err).NotTo(HaveOccurred()) d.ConntrackDumpingDaemonSet() - d.OVSFlowsDumpingDaemonSet(deploymentconfig.Get().ExternalBridgeName()) + d.OVSFlowsDumpingDaemonSet("breth0") d.IPTablesDumpingDaemonSet() bandwidthPerMigration := resource.MustParse("40Mi") @@ -1540,7 +1533,7 @@ fi description: "restart", cmd: func() { By("Restarting vm") - output, err := virtClient.RestartVirtualMachine(vmi) + output, err := exec.Command("virtctl", "restart", "-n", namespace, vmi.Name).CombinedOutput() Expect(err).NotTo(HaveOccurred(), output) By("Wait some time to vmi conditions to catch up after restart") @@ -1671,11 +1664,11 @@ write_files: ingress string } var ( - containerNetwork = func(td testData) (infraapi.Network, error) { + containerNetwork = func(td testData) string { if td.ingress == "routed" { - return infraprovider.Get().GetNetwork("bgpnet") + return "bgpnet" } - return infraprovider.Get().PrimaryNetwork() + return "kind" } exposeVMIperfServer = func(td testData, vmi *kubevirtv1.VirtualMachineInstance, vmiAddresses []string) ([]string, int32) { GinkgoHelper() @@ -1721,7 +1714,17 @@ write_files: if td.topology == udnv1.NetworkTopologyLocalnet { By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) + nodes := ovsPods(clientSet) + Expect(nodes).NotTo(BeEmpty()) + DeferCleanup(func() { + if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + } + }) + + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) } createCUDN(cudn) @@ -1752,22 +1755,23 @@ write_files: iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, td.role, []string{}) Expect(err).NotTo(HaveOccurred()) - var externalContainer infraapi.ExternalContainer - if td.role == udnv1.NetworkRolePrimary { - providerNetwork, err := containerNetwork(td) - Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") - externalContainerPort := infraprovider.Get().GetExternalContainerPort() - externalContainerName := namespace + "-iperf" - externalContainerSpec := infraapi.ExternalContainer{ - Name: externalContainerName, - Image: images.IPerf3(), - Network: providerNetwork, - CmdArgs: []string{"sleep infinity"}, - ExtPort: externalContainerPort, - } - externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) - Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") + network, err := infraprovider.Get().PrimaryNetwork() + Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") + if containerNetwork := containerNetwork(td); containerNetwork != network.Name() { + network, err = infraprovider.Get().GetNetwork(containerNetwork) + Expect(err).ShouldNot(HaveOccurred(), "must to get alternative network") + } + externalContainerPort := infraprovider.Get().GetExternalContainerPort() + externalContainerName := namespace + "-iperf" + externalContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.IPerf3(), + Network: network, + Args: []string{"sleep infinity"}, + ExtPort: externalContainerPort, } + externalContainer, err := providerCtx.CreateExternalContainer(externalContainerSpec) + Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") var externalContainerIPs []string if externalContainer.IsIPv4() { @@ -1780,8 +1784,8 @@ write_files: if td.ingress == "routed" { // pre=created test dependency and therefore we dont delete frrExternalContainer := infraapi.ExternalContainer{Name: "frr"} - frrNetwork, err := containerNetwork(td) - Expect(err).NotTo(HaveOccurred()) + frrNetwork, err := infraprovider.Get().GetNetwork(containerNetwork(td)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to fetch network %q: %v", containerNetwork(td), err)) frrExternalContainerInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(frrExternalContainer, frrNetwork) Expect(err).NotTo(HaveOccurred(), "must fetch FRR container network interface attached to secondary network") @@ -1807,7 +1811,7 @@ ip route add %[3]s via %[4]s step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return virtClient.LoginToFedora(vmi, "fedora", "fedora") + return kubevirt.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). @@ -1832,7 +1836,7 @@ ip route add %[3]s via %[4]s Expect(testPodsIPs).NotTo(BeEmpty()) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(virtClient, vmi). + WithArguments(vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1851,16 +1855,16 @@ ip route add %[3]s via %[4]s expectedIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(nodeRunningVMI, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(virtClient, vmi). + WithArguments(vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{expectedIPv6GatewayPath}), "should filter remote ipv6 gateway nexthop") } step = by(vmi.Name, fmt.Sprintf("Check north/south traffic before %s %s", td.resource.description, td.test.description)) - output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) + output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) - checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) + Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) + checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { _, err := infraprovider.Get().ExecExternalContainerCommand(externalContainer, []string{"bash", "-c", iperfServerScript}) @@ -1880,13 +1884,13 @@ ip route add %[3]s via %[4]s td.test.cmd() step = by(vmi.Name, fmt.Sprintf("Login to virtual machine after %s %s", td.resource.description, td.test.description)) - Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) obtainedAddresses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) Expect(obtainedAddresses).To(Equal(expectedAddreses)) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(virtClient, vmi). + WithArguments(vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1896,15 +1900,15 @@ ip route add %[3]s via %[4]s // At restart we need re-connect Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) if td.role == udnv1.NetworkRolePrimary { - output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) + output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) + Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) } } checkEastWestIperfTraffic(vmi, testPodsIPs, step) if td.role == udnv1.NetworkRolePrimary { step = by(vmi.Name, fmt.Sprintf("Check north/south traffic after %s %s", td.resource.description, td.test.description)) - checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) + checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { checkNorthSouthEgressIperfTraffic(vmi, externalContainerIPs, iperf3DefaultPort, step) @@ -1924,7 +1928,7 @@ ip route add %[3]s via %[4]s Expect(err).NotTo(HaveOccurred(), step) Eventually(kubevirt.RetrieveCachedGatewayMAC). - WithArguments(virtClient, vmi, "enp1s0", cidrIPv4). + WithArguments(vmi, "enp1s0", cidrIPv4). WithTimeout(10*time.Second). WithPolling(time.Second). Should(Equal(expectedGatewayMAC), step) @@ -1938,7 +1942,7 @@ ip route add %[3]s via %[4]s targetNodeIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(targetNode, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(virtClient, vmi). + WithArguments(vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{targetNodeIPv6GatewayPath}), "should reconcile ipv6 gateway nexthop after live migration") @@ -2199,10 +2203,20 @@ chpasswd: { expire: False } ) DescribeTable("should maintain tcp connection with minimal downtime", func(td func(vmi *kubevirtv1.VirtualMachineInstance)) { By("setting up the localnet underlay") + nodes := ovsPods(clientSet) + Expect(nodes).NotTo(BeEmpty()) + DeferCleanup(func() { + if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + } + }) + cudn, networkName := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLocalnet, udnv1.NetworkRoleSecondary, udnv1.DualStackCIDRs{}) createCUDN(cudn) - Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) workerNodeList, err := fr.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: labels.FormatLabels(map[string]string{"node-role.kubernetes.io/worker": ""})}) Expect(err).NotTo(HaveOccurred()) @@ -2234,14 +2248,14 @@ chpasswd: { expire: False } step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return virtClient.LoginToFedora(vmi, "fedora", "fedora") + return kubevirt.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Succeed(), step) step = by(vmi.Name, "Wait for cloud init to finish at first boot") - output, err := virtClient.RunCommand(vmi, "cloud-init status --wait", time.Minute) + output, err := kubevirt.RunCommand(vmi, "cloud-init status --wait", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) testPodsIPs := podsMultusNetworkIPs(iperfServerTestPods, podNetworkStatusByNetConfigPredicate(namespace, cudn.Name, strings.ToLower(string(cudn.Spec.Network.Localnet.Role)))) @@ -2258,13 +2272,13 @@ chpasswd: { expire: False } Expect(crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) step = by(vmi.Name, "Login to virtual machine after virtual machine instance live migration") - Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Check east/west traffic after virtual machine instance live migration") checkEastWestIperfTraffic(vmi, testPodsIPs, step) By("Stop iperf3 traffic before force killing vm, so iperf3 server do not get stuck") - output, err = virtClient.RunCommand(vmi, "killall iperf3", 5*time.Second) + output, err = kubevirt.RunCommand(vmi, "killall iperf3", 5*time.Second) Expect(err).ToNot(HaveOccurred(), output) step = by(vmi.Name, fmt.Sprintf("Force kill qemu at node %q where VM is running on", vmi.Status.NodeName)) @@ -2276,7 +2290,7 @@ chpasswd: { expire: False } Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) step = by(vmi.Name, "Login to virtual machine after virtual machine instance force killed") - Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Restart iperf traffic after forcing a vm failure") Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) diff --git a/test/e2e/kubevirt/client.go b/test/e2e/kubevirt/client.go deleted file mode 100644 index 60c2cbcc2f..0000000000 --- a/test/e2e/kubevirt/client.go +++ /dev/null @@ -1,128 +0,0 @@ -package kubevirt - -import ( - "fmt" - "io" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - - retry "k8s.io/client-go/util/retry" - - kubevirtv1 "kubevirt.io/api/core/v1" -) - -type Client struct { - path string -} - -func NewClient(cliDir string) (*Client, error) { - // Ensure the virtctl directory exists. - if err := os.MkdirAll(cliDir, 0755); err != nil { - return nil, fmt.Errorf("failed to create virtctl directory %q: %w", cliDir, err) - } - - // Ensure the virtctl executable is present. - if err := ensureVirtctl(cliDir); err != nil { - return nil, fmt.Errorf("failed to ensure virtctl: %w", err) - } - - return &Client{path: filepath.Join(cliDir, "virtctl")}, nil -} - -func (virtctl *Client) RestartVirtualMachine(vmi *kubevirtv1.VirtualMachineInstance) (string, error) { - output, err := exec.Command(virtctl.path, "restart", "-n", vmi.Namespace, vmi.Name).CombinedOutput() - if err != nil { - return "", fmt.Errorf("failed to restart VM: %w", err) - } - return string(output), nil -} - -func ensureVirtctl(cliDir string) error { - // Check if the "virtctl" executable exists in the specified path. - // If it does not exist, call the installVirtctl function. - if _, err := os.Stat(filepath.Join(cliDir, "virtctl")); os.IsNotExist(err) { - return installVirtctl(cliDir) - } else if err != nil { - return fmt.Errorf("error checking virtctl executable: %w", err) - } - return nil -} - -func downloadVirtctlBinary() (io.ReadCloser, error) { - // Fetch the latest stable version of KubeVirt from the stable.txt file. - stableResp, err := http.Get("https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt") - if err != nil { - return nil, fmt.Errorf("failed to fetch stable version: %w", err) - } - defer stableResp.Body.Close() - - // Check if the HTTP response status is OK. - if stableResp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to fetch stable version: received status code %d", stableResp.StatusCode) - } - - // Read the version from the response body. - versionBytes, err := io.ReadAll(stableResp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read stable version: %w", err) - } - version := strings.TrimSpace(string(versionBytes)) - - // Construct the download URL for the virtctl binary. - virtctlURL := fmt.Sprintf("https://github.com/kubevirt/kubevirt/releases/download/%s/virtctl-%s-linux-amd64", version, version) - - // Download the virtctl binary. - virtctlResp, err := http.Get(virtctlURL) - if err != nil { - return nil, fmt.Errorf("failed to download virtctl: %w", err) - } - - // Check if the HTTP response status is OK. - if virtctlResp.StatusCode != http.StatusOK { - // Close the body on error to prevent resource leaks - virtctlResp.Body.Close() - return nil, fmt.Errorf("failed to download virtctl: received status code %d", virtctlResp.StatusCode) - } - - return virtctlResp.Body, nil -} - -func installVirtctl(cliDir string) error { - var virtctlBody io.ReadCloser - allErrors := func(err error) bool { - return true - } - err := retry.OnError(retry.DefaultRetry, allErrors, func() error { - var downloadErr error - virtctlBody, downloadErr = downloadVirtctlBinary() - return downloadErr // Return the error if download failed, nil otherwise. - }) - if err != nil { - // If err is not nil here, it means all retries failed. - return err - } - defer virtctlBody.Close() // Ensure the body is closed - - // Save the binary to the specified directory. - cliPath := filepath.Join(cliDir, "virtctl") - outFile, err := os.Create(cliPath) - if err != nil { - return fmt.Errorf("failed to create virtctl file at %s: %w", cliPath, err) - } - defer outFile.Close() - - _, err = io.Copy(outFile, virtctlBody) - if err != nil { - return fmt.Errorf("failed to save virtctl binary to %s: %w", cliPath, err) - } - - // Make the binary executable. - if err := os.Chmod(cliPath, 0755); err != nil { - return fmt.Errorf("failed to make virtctl executable at %s: %w", cliPath, err) - } - - return nil -} diff --git a/test/e2e/kubevirt/console.go b/test/e2e/kubevirt/console.go index 4ca7533be8..822bd04162 100644 --- a/test/e2e/kubevirt/console.go +++ b/test/e2e/kubevirt/console.go @@ -54,12 +54,21 @@ var ( shellFailRegexp = regexp.MustCompile(shellFail) ) +// SafeExpectBatch runs the batch from `expected`, connecting to a VMI's console and +// waiting `wait` seconds for the batch to return. +// It validates that the commands arrive to the console. +// NOTE: This functions heritage limitations from `expectBatchWithValidatedSend` refer to it to check them. +func safeExpectBatch(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) error { + _, err := safeExpectBatchWithResponse(vmi, expected, timeout) + return err +} + // safeExpectBatchWithResponse runs the batch from `expected`, connecting to a VMI's console and // waiting `wait` seconds for the batch to return with a response. // It validates that the commands arrive to the console. // NOTE: This functions inherits limitations from `expectBatchWithValidatedSend`, refer to it for more information. -func safeExpectBatchWithResponse(virtctlPath string, vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { - expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func safeExpectBatchWithResponse(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { + expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return nil, err } @@ -72,12 +81,8 @@ func safeExpectBatchWithResponse(virtctlPath string, vmi *v1.VirtualMachineInsta return resp, err } -func (virtctl *Client) RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { - return runCommand(virtctl.path, vmi, command, timeout) -} - -func runCommand(virtctlPath string, vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { - results, err := safeExpectBatchWithResponse(virtctlPath, vmi, []expect.Batcher{ +func RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { + results, err := safeExpectBatchWithResponse(vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BExp{R: PromptExpression}, &expect.BSnd{S: command + "\n"}, @@ -109,11 +114,10 @@ func skipInput(scanner *bufio.Scanner) bool { // newExpecter will connect to an already logged in VMI console and return the generated expecter it will wait `timeout` for the connection. func newExpecter( - virtctlPath string, vmi *v1.VirtualMachineInstance, timeout time.Duration, opts ...expect.Option) (expect.Expecter, <-chan error, error) { - virtctlCmd := []string{virtctlPath, "console", "-n", vmi.Namespace, vmi.Name} + virtctlCmd := []string{"virtctl", "console", "-n", vmi.Namespace, vmi.Name} return expect.SpawnWithArgs(virtctlCmd, timeout, expect.SendTimeout(timeout), expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) } @@ -178,13 +182,13 @@ func expectBatchWithValidatedSend(expecter expect.Expecter, batch []expect.Batch return res, err } -func (virtctl *Client) LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { - return loginToFedoraWithHostname(virtctl.path, vmi, user, password, vmi.Name) +func LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { + return LoginToFedoraWithHostname(vmi, user, password, vmi.Name) } // LoginToFedora performs a console login to a Fedora base VM -func loginToFedoraWithHostname(virtctlPath string, vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { - expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func LoginToFedoraWithHostname(vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { + expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return err } diff --git a/test/e2e/kubevirt/ip.go b/test/e2e/kubevirt/ip.go index 3e11bd9b92..180c7d252a 100644 --- a/test/e2e/kubevirt/ip.go +++ b/test/e2e/kubevirt/ip.go @@ -8,7 +8,7 @@ import ( v1 "kubevirt.io/api/core/v1" ) -func RetrieveAllGlobalAddressesFromGuest(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveAllGlobalAddressesFromGuest(vmi *v1.VirtualMachineInstance) ([]string, error) { ifaces := []struct { Name string `json:"ifname"` Addresses []struct { @@ -19,7 +19,7 @@ func RetrieveAllGlobalAddressesFromGuest(cli *Client, vmi *v1.VirtualMachineInst } `json:"addr_info"` }{} - output, err := cli.RunCommand(vmi, "ip -j a show", 2*time.Second) + output, err := RunCommand(vmi, "ip -j a show", 2*time.Second) if err != nil { return nil, fmt.Errorf("failed retrieving adresses with ip command: %s: %w", output, err) } diff --git a/test/e2e/kubevirt/net.go b/test/e2e/kubevirt/net.go index 03b7e819ff..8c65118ae1 100644 --- a/test/e2e/kubevirt/net.go +++ b/test/e2e/kubevirt/net.go @@ -27,7 +27,7 @@ nmcli c mod %[1]s ipv4.addresses "" ipv6.addresses "" ipv4.gateway "" ipv6.gatew nmcli d reapply %[1]s`, iface) } -func RetrieveCachedGatewayMAC(cli *Client, vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { +func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { _, ipNet, err := net.ParseCIDR(cidr) if err != nil { return "", err @@ -35,7 +35,7 @@ func RetrieveCachedGatewayMAC(cli *Client, vmi *kubevirtv1.VirtualMachineInstanc gatewayIP := util.GetNodeGatewayIfAddr(ipNet).IP.String() - output, err := cli.RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) + output, err := RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) if err != nil { return "", fmt.Errorf("%s: %v", output, err) } @@ -46,12 +46,12 @@ func RetrieveCachedGatewayMAC(cli *Client, vmi *kubevirtv1.VirtualMachineInstanc return outputSplit[4], nil } -func RetrieveIPv6Gateways(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveIPv6Gateways(vmi *v1.VirtualMachineInstance) ([]string, error) { routes := []struct { Gateway string `json:"gateway"` }{} - output, err := cli.RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) + output, err := RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/e2e/kubevirt/nmstate.go b/test/e2e/kubevirt/nmstate.go index bd852ca794..10e8e34108 100644 --- a/test/e2e/kubevirt/nmstate.go +++ b/test/e2e/kubevirt/nmstate.go @@ -27,8 +27,8 @@ type NetworkState struct { Interfaces []Interface `json:"interfaces"` } -func RetrieveNetworkState(cli *Client, vmi *v1.VirtualMachineInstance) (*NetworkState, error) { - output, err := cli.RunCommand(vmi, "nmstatectl show --json", 2*time.Second) +func RetrieveNetworkState(vmi *v1.VirtualMachineInstance) (*NetworkState, error) { + output, err := RunCommand(vmi, "nmstatectl show --json", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/e2e/label/label.go b/test/e2e/label/label.go index 61448bf930..6f81c9ceb1 100644 --- a/test/e2e/label/label.go +++ b/test/e2e/label/label.go @@ -40,11 +40,3 @@ func processOverrides(s string) string { } return overRide } - -// Extended returns a label used to label extended feature tests. This label -// might be used to label feature tests that are considered not to be testing -// the core functionality of a feature and that might be filtered out for -// various reasons like for example to keep selected job run times down. -func Extended() ginkgo.Labels { - return ginkgo.Label("EXTENDED") -} diff --git a/test/e2e/localnet-underlay.go b/test/e2e/localnet-underlay.go index df8caf702f..03649143dd 100644 --- a/test/e2e/localnet-underlay.go +++ b/test/e2e/localnet-underlay.go @@ -1 +1,237 @@ package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" +) + +const ( + defaultOvsBridge = "breth0" + secondaryBridge = "ovsbr1" + add = "add-br" + del = "del-br" +) + +func setupUnderlay(ovsPods []v1.Pod, bridgeName, portName, networkName string, vlanID int) error { + for _, ovsPod := range ovsPods { + if bridgeName != defaultOvsBridge { + if err := addOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { + return err + } + + if vlanID > 0 { + if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, bridgeName, portName, vlanID); err != nil { + return err + } + } else { + if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, bridgeName, portName); err != nil { + return err + } + } + } + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + bridgeMapping(networkName, bridgeName), + ); err != nil { + return err + } + } + return nil +} + +func ovsRemoveSwitchPort(ovsPods []v1.Pod, portName string, newVLANID int) error { + for _, ovsPod := range ovsPods { + if err := ovsRemoveVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName); err != nil { + return fmt.Errorf("failed to remove old VLAN port: %v", err) + } + + if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName, newVLANID); err != nil { + return fmt.Errorf("failed to add new VLAN port: %v", err) + } + } + + return nil +} + +func teardownUnderlay(ovsPods []v1.Pod, bridgeName string) error { + for _, ovsPod := range ovsPods { + if bridgeName != defaultOvsBridge { + if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { + return err + } + } + // restore default bridge mapping + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + ); err != nil { + return err + } + } + return nil +} + +func ovsPods(clientSet clientset.Interface) []v1.Pod { + const ( + ovsNodeLabel = "app=ovs-node" + ) + pods, err := clientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ovsNodeLabel}, + ) + if err != nil { + return nil + } + return pods.Items +} + +func addOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := strings.Join([]string{"ovs-vsctl", add, bridgeName}, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func removeOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := strings.Join([]string{"ovs-vsctl", del, bridgeName}, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { + cmd := strings.Join([]string{ + "ovs-vsctl", "add-port", bridgeName, portName, + }, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { + cmd := strings.Join([]string{ + "ovs-vsctl", "add-port", bridgeName, portName, fmt.Sprintf("tag=%d", vlanID), "vlan_mode=access", + }, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +func ovsRemoveVLANAccessPort(podNamespace, podName string, bridgeName string, portName string) error { + cmd := strings.Join([]string{ + "ovs-vsctl", "del-port", bridgeName, portName, + }, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +type BridgeMapping struct { + physnet string + ovsBridge string +} + +func (bm BridgeMapping) String() string { + return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) +} + +type BridgeMappings []BridgeMapping + +func (bms BridgeMappings) String() string { + return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") +} + +func Map[T, V any](items []T, fn func(T) V) []V { + result := make([]V, len(items)) + for i, t := range items { + result[i] = fn(t) + } + return result +} + +func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { + mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) + cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) + } + return nil +} + +func defaultNetworkBridgeMapping() BridgeMapping { + return BridgeMapping{ + physnet: "physnet", + ovsBridge: "breth0", + } +} + +func bridgeMapping(physnet, ovsBridge string) BridgeMapping { + return BridgeMapping{ + physnet: physnet, + ovsBridge: ovsBridge, + } +} + +// TODO: make this function idempotent; use golang netlink instead +func createVLANInterface(deviceName string, vlanID string, ipAddress *string) error { + vlan := vlanName(deviceName, vlanID) + cmd := exec.Command("sudo", "ip", "link", "add", "link", deviceName, "name", vlan, "type", "vlan", "id", vlanID) + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to create vlan interface %s: %v", vlan, err) + } + + cmd = exec.Command("sudo", "ip", "link", "set", "dev", vlan, "up") + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to enable vlan interface %s: %v", vlan, err) + } + + if ipAddress != nil { + cmd = exec.Command("sudo", "ip", "addr", "add", *ipAddress, "dev", vlan) + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to define the vlan interface %q IP Address %s: %v", vlan, *ipAddress, err) + } + } + return nil +} + +// TODO: make this function idempotent; use golang netlink instead +func deleteVLANInterface(deviceName string, vlanID string) error { + vlan := vlanName(deviceName, vlanID) + cmd := exec.Command("sudo", "ip", "link", "del", vlan) + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to delete vlan interface %s: %v", vlan, err) + } + return nil +} + +func vlanName(deviceName string, vlanID string) string { + // MAX IFSIZE 16; got to truncate it to add the vlan suffix + if len(deviceName)+len(vlanID)+1 > 16 { + deviceName = deviceName[:len(deviceName)-len(vlanID)-1] + } + return fmt.Sprintf("%s.%s", deviceName, vlanID) +} diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index e82255fc57..46ad7eedc5 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -5,6 +5,9 @@ import ( "errors" "fmt" "net/netip" + "os" + "os/exec" + "strconv" "strings" "time" @@ -13,6 +16,7 @@ import ( . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" + "github.com/docker/docker/client" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,10 +31,6 @@ import ( ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" - "github.com/ovn-org/ovn-kubernetes/test/e2e/images" - "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" - infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" ) const ( @@ -55,10 +55,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { f := wrappedTestFramework("multi-homing") var ( - cs clientset.Interface - nadClient nadclient.K8sCniCncfIoV1Interface - mnpClient mnpclient.K8sCniCncfIoV1beta1Interface - providerCtx infraapi.Context + cs clientset.Interface + nadClient nadclient.K8sCniCncfIoV1Interface + mnpClient mnpclient.K8sCniCncfIoV1beta1Interface ) BeforeEach(func() { @@ -69,7 +68,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(err).NotTo(HaveOccurred()) mnpClient, err = mnpclient.NewForConfig(f.ClientConfig()) Expect(err).NotTo(HaveOccurred()) - providerCtx = infraprovider.Get().NewTestContext() }) Context("A single pod with an OVN-K secondary network", func() { @@ -81,10 +79,8 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { if netConfig.topology == "localnet" { By("applying ovs bridge mapping") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + Expect(setBridgeMappings(cs, defaultNetworkBridgeMapping(), bridgeMapping(netConfig.networkName, secondaryBridge))).NotTo(HaveOccurred()) + ginkgo.DeferCleanup(setBridgeMappings, cs, defaultNetworkBridgeMapping()) } By("creating the attachment configuration") @@ -282,7 +278,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { port = 9000 ) - ginkgo.DescribeTable("attached to a localnet network mapped to external primary interface bridge", //nolint:lll + ginkgo.DescribeTable("attached to a localnet network mapped to breth0", func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration, isCollocatedPods bool) { By("Get two scheduable nodes and ensure client and server are located on distinct Nodes") @@ -309,10 +305,13 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { } By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - BridgeName: deploymentconfig.Get().ExternalBridgeName(), - LogicalNetworkName: netConfig.networkName, - })).To(Succeed()) + pods := ovsPods(cs) + Expect(pods).NotTo(BeEmpty()) + defer func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(pods, defaultOvsBridge)).To(Succeed()) + }() + Expect(setupUnderlay(pods, defaultOvsBridge, "", netConfig.networkName, netConfig.vlanID)).To(Succeed()) nad := generateNAD(netConfig) By(fmt.Sprintf("creating the attachment configuration: %v\n", nad)) @@ -547,10 +546,16 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { serverPodConfig.namespace = f.Namespace.Name if netConfig.topology == "localnet" { - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + By("setting up the localnet underlay") + nodes := ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + defer func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + }() + + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) } By("creating the attachment configuration") @@ -896,17 +901,19 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("localnet OVN-K secondary network", func() { const ( - clientPodName = "client-pod" - nodeHostnameKey = "kubernetes.io/hostname" - servicePort uint16 = 9000 - dockerNetworkName = "underlay" - underlayServiceIP = "60.128.0.1" - expectedOriginalMTU = 1200 + clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" + servicePort uint16 = 9000 + dockerNetworkName = "underlay" + underlayServiceIP = "60.128.0.1" + secondaryInterfaceName = "eth1" + expectedOriginalMTU = 1200 ) - var ( - netConfig networkAttachmentConfig - ) + var netConfig networkAttachmentConfig + var nodes []v1.Pod + var underlayBridgeName string + var cmdWebServer *exec.Cmd underlayIP := underlayServiceIP + "/24" Context("with a service running on the underlay", func() { @@ -923,29 +930,33 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }) By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + nodes = ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + }) + + BeforeEach(func() { + By("adding IP to the underlay docker bridge") + cli, err := client.NewClientWithOpts(client.FromEnv) + Expect(err).NotTo(HaveOccurred()) + + gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) + Expect(err).NotTo(HaveOccurred()) + + underlayBridgeName, err = findInterfaceByIP(gatewayIP) + Expect(err).NotTo(HaveOccurred()) + + cmd := exec.Command("sudo", "ip", "addr", "add", underlayIP, "dev", underlayBridgeName) + cmd.Stderr = os.Stderr + err = cmd.Run() + Expect(err).NotTo(HaveOccurred()) }) BeforeEach(func() { By("starting a service, connected to the underlay") - providerCtx = infraprovider.Get().NewTestContext() - - underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) - Expect(err).NotTo(HaveOccurred(), "must get underlay network") - externalContainerName := f.Namespace.Name + "-web-server" - serviceContainerSpec := infraapi.ExternalContainer{ - Name: externalContainerName, - Image: images.AgnHost(), - Network: underlayNetwork, - Entrypoint: "bash", - CmdArgs: []string{"-c", fmt.Sprintf("ip a add %s/24 dev eth0 && ./agnhost netexec --http-port=%d", underlayServiceIP, servicePort)}, - ExtPort: servicePort, - } - _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) - Expect(err).NotTo(HaveOccurred(), "must create external container 1") + cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(int(servicePort))) + cmdWebServer.Stderr = os.Stderr + Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") }) BeforeEach(func() { @@ -958,6 +969,23 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(err).NotTo(HaveOccurred()) }) + AfterEach(func() { + err := cmdWebServer.Process.Kill() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + cmd := exec.Command("sudo", "ip", "addr", "del", underlayIP, "dev", underlayBridgeName) + cmd.Stderr = os.Stderr + err := cmd.Run() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + }) + It("correctly sets the MTU on the pod", func() { Eventually(func() error { clientPodConfig := podConfiguration{ @@ -988,7 +1016,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { By("asserting the *client* pod can contact the underlay service") Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) - }) Context("and networkAttachmentDefinition is modified", func() { @@ -1086,10 +1113,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("and the service connected to the underlay is reconfigured to connect to the new VLAN-ID", func() { BeforeEach(func() { - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: newLocalnetVLANID, - })).To(Succeed(), "configuring the OVS bridge with new localnet vlan id") + Expect(ovsRemoveSwitchPort(nodes, secondaryInterfaceName, newLocalnetVLANID)).To(Succeed()) }) It("can now communicate over a localnet secondary network from pod to the underlay service", func() { @@ -1279,6 +1303,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("with a trunked configuration", func() { const vlanID = 20 BeforeEach(func() { + nodes = ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + // we are setting up the bridge in trunked mode by not // specifying a particular VLAN ID on the network conf netConfig = newNetworkAttachmentConfig( @@ -1291,35 +1318,32 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }) By("setting up the localnet underlay with a trunked configuration") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed(), "configuring the OVS bridge") - - By("starting a service, connected to the underlay over a VLAN") - providerCtx = infraprovider.Get().NewTestContext() - - ifName := "eth0" - vlanName := fmt.Sprintf("%s.%d", ifName, vlanID) - underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) - Expect(err).NotTo(HaveOccurred(), "must get underlay network") - externalContainerName := f.Namespace.Name + "-web-server" - serviceContainerSpec := infraapi.ExternalContainer{ - Name: externalContainerName, - Image: images.AgnHost(), - Network: underlayNetwork, - Entrypoint: "bash", - ExtPort: servicePort, - CmdArgs: []string{"-c", fmt.Sprintf(` -ip link add link %[1]s name %[2]s type vlan id %[3]d -ip link set dev %[2]s up -ip a add %[4]s/24 dev %[2]s -./agnhost netexec --http-port=%[5]d -`, ifName, vlanName, vlanID, underlayServiceIP, servicePort)}, - } - _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) - Expect(err).NotTo(HaveOccurred(), "must create external container 1") + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed(), "configuring the OVS bridge") + + By(fmt.Sprintf("creating a VLAN interface on top of the bridge connecting the cluster nodes with IP: %s", underlayIP)) + cli, err := client.NewClientWithOpts(client.FromEnv) + Expect(err).NotTo(HaveOccurred()) + + gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) + Expect(err).NotTo(HaveOccurred()) + + underlayBridgeName, err = findInterfaceByIP(gatewayIP) + Expect(err).NotTo(HaveOccurred()) + Expect(createVLANInterface(underlayBridgeName, strconv.Itoa(vlanID), &underlayIP)).To( + Succeed(), + "create a VLAN interface on the bridge interconnecting the cluster nodes", + ) + + By("starting a service, connected to the underlay") + cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(port)) + cmdWebServer.Stderr = os.Stderr + Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") + }) + AfterEach(func() { + Expect(cmdWebServer.Process.Kill()).NotTo(HaveOccurred(), "kill the python webserver") + Expect(deleteVLANInterface(underlayBridgeName, strconv.Itoa(vlanID))).NotTo(HaveOccurred(), "remove the underlay physical configuration") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed(), "tear down the localnet underlay") }) It("the same bridge mapping can be shared by a separate VLAN by using the physical network name attribute", func() { @@ -1352,7 +1376,6 @@ ip a add %[4]s/24 dev %[2]s By(fmt.Sprintf("asserting the *client* pod can contact the underlay service with IP %q on the separate vlan", underlayIP)) Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) - }) }) }) @@ -1400,10 +1423,15 @@ ip a add %[4]s/24 dev %[2]s if netConfig.topology == "localnet" { By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + nodes := ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + defer func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + }() + + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) } Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1822,10 +1850,14 @@ ip a add %[4]s/24 dev %[2]s netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + nodes := ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + defer func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + }() + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1950,10 +1982,14 @@ ip a add %[4]s/24 dev %[2]s netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - LogicalNetworkName: netConfig.networkName, - VlanID: netConfig.vlanID, - })).To(Succeed()) + nodes := ovsPods(cs) + Expect(nodes).NotTo(BeEmpty()) + defer func() { + By("tearing down the localnet underlay") + Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) + }() + const secondaryInterfaceName = "eth1" + Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -2248,3 +2284,18 @@ func addIPRequestToPodConfig(cs clientset.Interface, podConfig *podConfiguration } return nil } + +func setBridgeMappings(cs clientset.Interface, mappings ...BridgeMapping) error { + pods := ovsPods(cs) + if len(pods) == 0 { + return fmt.Errorf("pods list is empty") + } + + for _, pods := range pods { + if err := configureBridgeMappings(pods.Namespace, pods.Name, mappings...); err != nil { + return err + } + } + + return nil +} diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index d7921cef00..2fb10354d4 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -704,39 +704,3 @@ func getNetworkGateway(cli *client.Client, networkName string) (string, error) { return "", fmt.Errorf("Gateway not found for network %q", networkName) } - -func getPodAnnotationForAttachment(pod *v1.Pod, attachmentName string) (PodAnnotation, error) { - podAnnotation, err := unmarshalPodAnnotation(pod.Annotations, attachmentName) - if err != nil { - return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) - } - - return *podAnnotation, nil -} - -func getPodAnnotationIPsForAttachment(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string) ([]*net.IPNet, error) { - pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - podAnnotation, err := getPodAnnotationForAttachment(pod, attachmentName) - if err != nil { - return nil, err - } - return podAnnotation.IPs, nil -} - -// podIPsForNetworkByIndex returns the v4 or v6 IPs for a pod on the UDN -func getPodAnnotationIPsForAttachmentByIndex(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { - ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, attachmentName) - if err != nil { - return "", err - } - if index >= len(ipnets) { - return "", fmt.Errorf("no IP at index %d for attachment %s on pod %s", index, attachmentName, namespacedName(podNamespace, podName)) - } - if len(ipnets) > 2 { - return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) - } - return ipnets[index].IP.String(), nil -} diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index bb667b2d94..659b18acc7 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -175,7 +175,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { By("asserting the server pod has an IP from the configured range") - serverIP, err = getPodAnnotationIPsForAttachmentByIndex( + serverIP, err = podIPsForUserDefinedPrimaryNetwork( cs, f.Namespace.Name, serverPodConfig.name, @@ -610,7 +610,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By("creating pod " + podConfig.name + " in " + podConfig.namespace) pod := runUDNPod(cs, podConfig.namespace, podConfig, nil) pods = append(pods, pod) - podIP, err := getPodAnnotationIPsForAttachmentByIndex( + podIP, err := podIPsForUserDefinedPrimaryNetwork( cs, pod.Namespace, pod.Name, @@ -792,7 +792,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By(fmt.Sprintf("asserting network works in namespace %s", config.namespace)) for i, cidr := range strings.Split(config.cidr, ",") { if cidr != "" { - serverIP, err = getPodAnnotationIPsForAttachmentByIndex( + serverIP, err = podIPsForUserDefinedPrimaryNetwork( cs, config.namespace, serverPodConfig.name, @@ -1478,7 +1478,7 @@ spec: Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - CmdArgs: httpServerContainerCmd(uint16(externalContainerPort)), + Args: httpServerContainerCmd(uint16(externalContainerPort)), ExtPort: externalContainerPort, } externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) @@ -1756,7 +1756,7 @@ spec: clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: node2Name} runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil) runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil) - serverIP, err := getPodAnnotationIPsForAttachmentByIndex(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) + serverIP, err := podIPsForUserDefinedPrimaryNetwork(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) Expect(err).ShouldNot(HaveOccurred(), "UDN pod IP must be retrieved") By("restart OVNKube node pods on client and server Nodes and ensure connectivity") serverPod := getPod(f, serverPodConfig.name) @@ -1887,17 +1887,31 @@ func generateLayer3Subnets(cidrs string) []string { // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided udn func userDefinedNetworkReadyFunc(client dynamic.Interface, namespace, name string) func() error { - return networkReadyFunc(client.Resource(udnGVR).Namespace(namespace), name) + return func() error { + udn, err := client.Resource(udnGVR).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}, "status") + if err != nil { + return err + } + conditions, err := getConditions(udn) + if err != nil { + return err + } + if len(conditions) == 0 { + return fmt.Errorf("no conditions found in: %v", udn) + } + for _, condition := range conditions { + if condition.Type == "NetworkCreated" && condition.Status == metav1.ConditionTrue { + return nil + } + } + return fmt.Errorf("no NetworkCreated condition found in: %v", udn) + } } // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided cluster udn func clusterUserDefinedNetworkReadyFunc(client dynamic.Interface, name string) func() error { - return networkReadyFunc(client.Resource(clusterUDNGVR), name) -} - -func networkReadyFunc(client dynamic.ResourceInterface, name string) func() error { return func() error { - cUDN, err := client.Get(context.Background(), name, metav1.GetOptions{}, "status") + cUDN, err := client.Resource(clusterUDNGVR).Get(context.Background(), name, metav1.GetOptions{}, "status") if err != nil { return err } @@ -2275,6 +2289,26 @@ func withNetworkAttachment(networks []nadapi.NetworkSelectionElement) podOption } } +// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN +func podIPsForUserDefinedPrimaryNetwork(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return "", err + } + netStatus, err := userDefinedNetworkStatus(pod, attachmentName) + if err != nil { + return "", err + } + + if len(netStatus.IPs) == 0 { + return "", fmt.Errorf("attachment for network %q without IPs", attachmentName) + } + if len(netStatus.IPs) > 2 { + return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) + } + return netStatus.IPs[index].IP.String(), nil +} + func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, podName string) (string, string, error) { pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) if err != nil { @@ -2284,6 +2318,15 @@ func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, return ipv4, ipv6, nil } +func userDefinedNetworkStatus(pod *v1.Pod, networkName string) (PodAnnotation, error) { + netStatus, err := unmarshalPodAnnotation(pod.Annotations, networkName) + if err != nil { + return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) + } + + return *netStatus, nil +} + func runUDNPod(cs clientset.Interface, namespace string, serverPodConfig podConfiguration, podSpecTweak func(*v1.Pod)) *v1.Pod { By(fmt.Sprintf("instantiating the UDN pod %s", serverPodConfig.name)) podSpec := generatePodSpec(serverPodConfig) diff --git a/test/e2e/network_segmentation_api_validations.go b/test/e2e/network_segmentation_api_validations.go index b3b29191fb..0608485b3d 100644 --- a/test/e2e/network_segmentation_api_validations.go +++ b/test/e2e/network_segmentation_api_validations.go @@ -6,13 +6,13 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" - testscenariocudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario/cudn" + "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" + testdatacudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata/cudn" ) var _ = Describe("Network Segmentation: API validations", func() { DescribeTable("api-server should reject invalid CRs", - func(scenarios []testscenario.ValidateCRScenario) { + func(scenarios []testdata.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -23,16 +23,16 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(stderr).To(ContainSubstring(s.ExpectedErr)) } }, - Entry("ClusterUserDefinedNetwork, mismatch topology and config", testscenariocudn.MismatchTopologyConfig), - Entry("ClusterUserDefinedNetwork, localnet, invalid role", testscenariocudn.LocalnetInvalidRole), - Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testscenariocudn.LocalnetInvalidPhyNetName), - Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testscenariocudn.LocalnetInvalidSubnets), - Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testscenariocudn.LocalnetInvalidMTU), - Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testscenariocudn.LocalnetInvalidVLAN), + Entry("ClusterUserDefinedNetwork, mismatch topology and config", testdatacudn.MismatchTopologyConfig), + Entry("ClusterUserDefinedNetwork, localnet, invalid role", testdatacudn.LocalnetInvalidRole), + Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testdatacudn.LocalnetInvalidPhyNetName), + Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testdatacudn.LocalnetInvalidSubnets), + Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testdatacudn.LocalnetInvalidMTU), + Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testdatacudn.LocalnetInvalidVLAN), ) DescribeTable("api-server should accept valid CRs", - func(scenarios []testscenario.ValidateCRScenario) { + func(scenarios []testdata.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -42,7 +42,7 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(err).NotTo(HaveOccurred(), "should create valid CR successfully") } }, - Entry("ClusterUserDefinedNetwork, localnet", testscenariocudn.LocalnetValid), + Entry("ClusterUserDefinedNetwork, localnet", testdatacudn.LocalnetValid), ) }) @@ -52,7 +52,7 @@ func runKubectlInputWithFullOutput(namespace string, data string, args ...string return e2ekubectl.NewKubectlCommand(namespace, args...).WithStdinData(data).ExecWithFullOutput() } -func cleanupValidateCRsTest(scenarios []testscenario.ValidateCRScenario) { +func cleanupValidateCRsTest(scenarios []testdata.ValidateCRScenario) { for _, s := range scenarios { e2ekubectl.RunKubectlInput("", s.Manifest, "delete", "-f", "-") } diff --git a/test/e2e/network_segmentation_localnet.go b/test/e2e/network_segmentation_localnet.go index 3acd6b1c20..1647baa9fa 100644 --- a/test/e2e/network_segmentation_localnet.go +++ b/test/e2e/network_segmentation_localnet.go @@ -9,8 +9,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" - infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,26 +21,20 @@ import ( ) var _ = Describe("Network Segmentation: Localnet", func() { - var ( - f = wrappedTestFramework("network-segmentation-localnet") - providerCtx infraapi.Context - ) + f := wrappedTestFramework("network-segmentation-localnet") f.SkipNamespaceCreation = true - BeforeEach(func() { - providerCtx = infraprovider.Get().NewTestContext() - }) - It("using ClusterUserDefinedNetwork CR, pods in different namespaces, should communicate over localnet topology", func() { const ( - vlan = 200 - testPort = 9000 - subnetIPv4 = "192.168.100.0/24" - subnetIPv6 = "2001:dbb::/64" - excludeSubnetIPv4 = "192.168.100.0/29" - excludeSubnetIPv6 = "2001:dbb::/120" + vlan = 200 + testPort = 9000 + subnetIPv4 = "192.168.100.0/24" + subnetIPv6 = "2001:dbb::/64" + excludeSubnetIPv4 = "192.168.100.0/29" + excludeSubnetIPv6 = "2001:dbb::/120" + secondaryIfaceName = "eth1" + ovsBrName = "ovsbr-eth1" ) - ovsBrName := "ovsbr-udn" // use unique names to avoid conflicts with tests running in parallel nsBlue := uniqueMetaName("blue") nsRed := uniqueMetaName("red") @@ -50,12 +42,14 @@ var _ = Describe("Network Segmentation: Localnet", func() { physicalNetworkName := uniqueMetaName("localnet1") By("setup the localnet underlay") + ovsPods := ovsPods(f.ClientSet) + Expect(ovsPods).NotTo(BeEmpty()) + DeferCleanup(func() { + By("teardown the localnet underlay") + Expect(teardownUnderlay(ovsPods, ovsBrName)).To(Succeed()) + }) c := networkAttachmentConfig{networkAttachmentConfigParams: networkAttachmentConfigParams{networkName: physicalNetworkName, vlanID: vlan}} - Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ - BridgeName: ovsBrName, - LogicalNetworkName: c.networkName, - VlanID: c.vlanID, - })).To(Succeed()) + Expect(setupUnderlay(ovsPods, ovsBrName, secondaryIfaceName, c.networkName, c.vlanID)).To(Succeed()) By("create test namespaces") _, err := f.ClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsRed}}, metav1.CreateOptions{}) diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 2b71ebea5c..ffcf5f728a 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -103,7 +103,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { ginkgo.By("asserting the server pod has an IP from the configured range") - serverIP, err = getPodAnnotationIPsForAttachmentByIndex( + serverIP, err = podIPsForUserDefinedPrimaryNetwork( cs, f.Namespace.Name, serverPodConfig.name, @@ -231,12 +231,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ } subnet, err := getNetCIDRSubnet(cidr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - allowServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, allowServerPodConfig.name, + allowServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, allowServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the allow server pod IP %v is from the configured range %v", allowServerPodIP, cidr)) gomega.Expect(inRange(subnet, allowServerPodIP)).To(gomega.Succeed()) - denyServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, denyServerPodConfig.name, + denyServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, denyServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the deny server pod IP %v is from the configured range %v", denyServerPodIP, cidr)) diff --git a/test/e2e/network_segmentation_utils.go b/test/e2e/network_segmentation_utils.go deleted file mode 100644 index 960b6889c7..0000000000 --- a/test/e2e/network_segmentation_utils.go +++ /dev/null @@ -1,22 +0,0 @@ -package e2e - -import ( - "k8s.io/client-go/kubernetes" - "k8s.io/utils/net" -) - -// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN -func getPodAnnotationIPsForPrimaryNetworkByIPFamily(k8sClient kubernetes.Interface, podNamespace string, podName string, networkName string, family net.IPFamily) (string, error) { - if networkName != "default" { - networkName = namespacedName(podNamespace, networkName) - } - ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, networkName) - if err != nil { - return "", err - } - ipnet := getFirstCIDROfFamily(family, ipnets) - if ipnet == nil { - return "", nil - } - return ipnet.IP.String(), nil -} diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index 19626e50e6..0326c2c7b7 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -132,7 +132,7 @@ spec: framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") externalContainerIPs[4], externalContainerIPs[6] = externalContainer.GetIPv4(), externalContainer.GetIPv6() @@ -454,7 +454,7 @@ spec: Expect(pods.Items).To(HaveLen(1)) ovnkPod = pods.Items[0] - cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) + cmd := "ovs-ofctl dump-flows breth0 table=0" err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -515,7 +515,7 @@ spec: time.Sleep(time.Duration(settleTimeout) * time.Second) By(fmt.Sprintf("Checking nodeport flows have been updated to use new IP: %s", migrationWorkerNodeIP)) - cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) + cmd := "ovs-ofctl dump-flows breth0 table=0" err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -628,7 +628,7 @@ func checkFlowsForMACPeriodically(ovnkPod v1.Pod, addr net.HardwareAddr, duratio } func checkFlowsForMAC(ovnkPod v1.Pod, mac net.HardwareAddr) error { - cmd := fmt.Sprintf("ovs-ofctl dump-flows %s", deploymentconfig.Get().ExternalBridgeName()) + cmd := "ovs-ofctl dump-flows breth0" flowOutput := e2epodoutput.RunHostCmdOrDie(ovnkPod.Namespace, ovnkPod.Name, cmd) lines := strings.Split(flowOutput, "\n") for _, line := range lines { diff --git a/test/e2e/pod.go b/test/e2e/pod.go index c9a5e5efb7..e43ecee03a 100644 --- a/test/e2e/pod.go +++ b/test/e2e/pod.go @@ -105,7 +105,7 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get provider primary network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - CmdArgs: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, + Args: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container (%s)", externalContainer) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index f65dd60631..a08b80c6b0 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -2,14 +2,10 @@ package e2e import ( "context" - "embed" "fmt" "math/rand" "net" - "os" - "path/filepath" "strings" - "text/template" "time" @@ -20,24 +16,14 @@ import ( apitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" - "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" - "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" - "github.com/ovn-org/ovn-kubernetes/test/e2e/label" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -48,14 +34,13 @@ import ( utilnet "k8s.io/utils/net" ) -const ( - serverContainerName = "bgpserver" - routerContainerName = "frr" - echoClientPodName = "echo-client-pod" - bgpExternalNetworkName = "bgpnet" -) - -var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", feature.RouteAdvertisements, func() { +var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", func() { + const ( + serverContainerName = "bgpserver" + routerContainerName = "frr" + echoClientPodName = "echo-client-pod" + bgpExternalNetworkName = "bgpnet" + ) var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -250,7 +235,14 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is }) }) -var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", feature.RouteAdvertisements, func() { +var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", func() { + const ( + serverContainerName = "bgpserver" + routerContainerName = "frr" + echoClientPodName = "echo-client-pod" + bgpExternalNetworkName = "bgpnet" + placeholder = "PLACEHOLDER_NAMESPACE" + ) var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -409,7 +401,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ginkgo.By("queries to the external server are not SNATed (uses podIP)") for _, serverContainerIP := range serverContainerIPs { - podIP, err := getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) + podIP, err := podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err, fmt.Sprintf("Getting podIPs for pod %s failed: %v", clientPod.Name, err)) framework.Logf("Client pod IP address=%s", podIP) @@ -428,7 +420,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { - podIP, err = getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) + podIP, err = podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") gomega.Expect(outputIP).To(gomega.Equal(podIP), @@ -527,7 +519,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ) }) -var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", feature.RouteAdvertisements, +var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", func(cudnATemplate, cudnBTemplate *udnv1.ClusterUserDefinedNetwork) { const curlConnectionTimeoutCode = "28" const ( @@ -840,9 +832,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[1] - clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -852,9 +844,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[2] - clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -864,7 +856,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[2] srvPod := podNetB - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -875,7 +867,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podNetB - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -885,7 +877,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podNetB - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -895,7 +887,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podsNetA[0] - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -924,7 +916,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[0].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -934,7 +926,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[2].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -1069,1149 +1061,3 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }, ), ) - -var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", feature.RouteAdvertisements, func() { - - // testing helpers used throughout this testing node - const ( - // FIXME: each test brings its own topology up, and sometimes zebra on - // external FRR container fails to start on the first attempt for - // unknown reasons delaying the overall availability, so we need to use - // long timeouts - timeout = 240 * time.Second - timeoutNOK = 10 * time.Second - pollingNOK = 1 * time.Second - netexecPort = 8080 - ) - var netexecPortStr = fmt.Sprintf("%d", netexecPort) - testPodToHostnameAndExpect := func(src *corev1.Pod, dstIP, expect string) { - ginkgo.GinkgoHelper() - hostname, err := e2epodoutput.RunHostCmdWithRetries( - src.Namespace, - src.Name, - fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/hostname", net.JoinHostPort(dstIP, netexecPortStr)), - framework.Poll, - timeout, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(hostname).To(gomega.Equal(expect)) - } - testPodToClientIP := func(src *corev1.Pod, dstIP string) { - ginkgo.GinkgoHelper() - _, err := e2epodoutput.RunHostCmdWithRetries( - src.Namespace, - src.Name, - fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), - framework.Poll, - timeout, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - testPodToClientIPAndExpect := func(src *corev1.Pod, dstIP, expect string) { - ginkgo.GinkgoHelper() - ip, err := e2epodoutput.RunHostCmdWithRetries( - src.Namespace, - src.Name, - fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), - framework.Poll, - timeout, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ip, _, err = net.SplitHostPort(ip) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ip).To(gomega.Equal(expect)) - } - testContainerToClientIPAndExpect := func(src, dstIP, expect string) { - ginkgo.GinkgoHelper() - gomega.Eventually(func(g gomega.Gomega) { - // FIXME: using ExecK8NodeCommand instead of - // ExecExternalContainerCommand, they arent any - // different but ExecK8NodeCommand is more convinient - ip, err := infraprovider.Get().ExecK8NodeCommand( - src, - []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, - ) - g.Expect(err).NotTo(gomega.HaveOccurred()) - ip, _, err = net.SplitHostPort(ip) - g.Expect(err).NotTo(gomega.HaveOccurred()) - g.Expect(ip).To(gomega.Equal(expect)) - }).WithTimeout(timeout).WithPolling(pollingNOK).Should(gomega.Succeed()) - } - testPodToClientIPNOK := func(src *corev1.Pod, dstIP string) { - gomega.Consistently(func(g gomega.Gomega) { - _, err := e2epodoutput.RunHostCmd( - src.Namespace, - src.Name, - fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), - ) - g.Expect(err).To(gomega.HaveOccurred()) - }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) - } - testContainerToClientIPNOK := func(src, dstIP string) { - gomega.Consistently(func(g gomega.Gomega) { - _, err := infraprovider.Get().ExecK8NodeCommand( - src, - []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, - ) - g.Expect(err).To(gomega.HaveOccurred()) - }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) - } - - const ( - baseName = "vrflite" - bgpPeerSubnetIPv4 = "172.36.0.0/16" - bgpPeerSubnetIPv6 = "fc00:f853:ccd:36::/64" - // TODO: test with overlaps but we need better isolation from the infra - // provider, docker `--internal` bridge networks with iptables based - // isolation doesn't cut it. macvlan driver might be a better option. - bgpServerSubnetIPv4 = "172.38.0.0/16" - bgpServerSubnetIPv6 = "fc00:f853:ccd:38::/64" - ) - - f := wrappedTestFramework(baseName) - f.SkipNamespaceCreation = true - var ipFamilySet sets.Set[utilnet.IPFamily] - var ictx infraapi.Context - var testBaseName, testSuffix, testNetworkName, bgpServerName string - - ginkgo.BeforeEach(func() { - if !isLocalGWModeEnabled() { - e2eskipper.Skipf("VRF-Lite test cases only supported in Local Gateway mode") - } - ipFamilySet = sets.New(getSupportedIPFamiliesSlice(f.ClientSet)...) - ictx = infraprovider.Get().NewTestContext() - testSuffix = framework.RandomSuffix() - testBaseName = baseName + testSuffix - testNetworkName = testBaseName - bgpServerName = testNetworkName + "-bgpserver" - - // we will create a agnhost server on an extra network peered with BGP - ginkgo.By("Running a BGP network with an agnhost server") - bgpPeerCIDRs := []string{bgpPeerSubnetIPv4, bgpPeerSubnetIPv6} - bgpServerCIDRs := []string{bgpServerSubnetIPv4, bgpServerSubnetIPv6} - gomega.Expect(runBGPNetworkAndServer(f, ictx, testNetworkName, bgpServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) - }) - - // define networks to test with - const ( - cudnCIDRv4 = "103.103.0.0/16" - cudnCIDRv6 = "2014:100:200::0/60" - ) - var ( - layer3NetworkSpec = &udnv1.NetworkSpec{ - Topology: udnv1.NetworkTopologyLayer3, - Layer3: &udnv1.Layer3Config{ - Role: "Primary", - Subnets: []udnv1.Layer3Subnet{{CIDR: cudnCIDRv4, HostSubnet: 24}, {CIDR: cudnCIDRv6, HostSubnet: 64}}, - }, - } - layer2NetworkSpec = &udnv1.NetworkSpec{ - Topology: udnv1.NetworkTopologyLayer2, - Layer2: &udnv1.Layer2Config{ - Role: "Primary", - Subnets: udnv1.DualStackCIDRs{cudnCIDRv4, cudnCIDRv6}, - }, - } - ) - - matchL3SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.Layer3Subnet) (out []udnv1.Layer3Subnet) { - for _, subnet := range in { - if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet.CIDR))) { - out = append(out, subnet) - } - } - return - } - matchL2SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.CIDR) (out []udnv1.CIDR) { - for _, subnet := range in { - if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet))) { - out = append(out, subnet) - } - } - return - } - - networksToTest := []ginkgo.TableEntry{ - ginkgo.Entry("Layer 3", layer3NetworkSpec), - ginkgo.Entry("Layer 2", layer2NetworkSpec), - } - - ginkgo.DescribeTableSubtree("When the tested network is of type", - func(networkSpec *udnv1.NetworkSpec) { - var testNamespace *corev1.Namespace - var testPod *corev1.Pod - - getSameNode := func() string { - return testPod.Spec.NodeName - } - getDifferentNode := func() string { - ginkgo.GinkgoHelper() - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get ready schedulable nodes") - for _, node := range nodes.Items { - if node.Name != testPod.Spec.NodeName { - return node.Name - } - } - ginkgo.Fail(fmt.Sprintf("Failed to find a different ready schedulable node than %s", testPod.Spec.NodeName)) - return "" - } - - ginkgo.BeforeEach(func() { - var err error - - switch { - case networkSpec.Layer3 != nil: - networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) - case networkSpec.Layer2 != nil: - networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) - } - - ginkgo.By("Configuring the namespace and network") - testNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, testNetworkName, cudnAdvertisedVRFLite, networkSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - f.Namespace = testNamespace - - // attach network to the VRF on all nodes - ginkgo.By("Attaching the BGP peer network to the CUDN VRF") - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - network, err := infraprovider.Get().GetNetwork(testNetworkName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - for _, node := range nodeList.Items { - iface, err := infraprovider.Get().GetK8NodeNetworkInterface(node.Name, network) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "link", "set", "dev", iface.InfName, "master", testNetworkName}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // quirk: need to reset IPv6 address - _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "address", "add", iface.IPv6 + "/" + iface.IPv6Prefix, "dev", iface.InfName}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }) - - ginkgo.Describe("When a pod runs on the tested network", func() { - ginkgo.BeforeEach(func() { - ginkgo.By("Running a pod on the tested network namespace") - testPod = e2epod.CreateExecPodOrFail( - context.Background(), - f.ClientSet, - testNamespace.Name, - testNamespace.Name+"-netexec-pod", - func(p *corev1.Pod) { - p.Spec.Containers[0].Args = []string{"netexec"} - }, - ) - }) - - ginkgo.DescribeTable("It can reach an external server on the same network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the pod can reach the external server") - bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) - gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) - testPodToHostnameAndExpect(testPod, serverIP, bgpServerName) - - ginkgo.By("Ensuring a request from the pod is not SNATed") - testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - testPod.Namespace, - testPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) - testPodToClientIPAndExpect(testPod, serverIP, testPodIP) - }, - ginkgo.Entry("When the network is IPv4", utilnet.IPv4), - ginkgo.Entry("When the network is IPv6", utilnet.IPv6), - ) - - ginkgo.DescribeTable("It can be reached by an external server on the same network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the external server can reach the pod") - bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) - gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) - podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - testPod.Namespace, - testPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(podIP).ToNot(gomega.BeEmpty()) - testContainerToClientIPAndExpect(bgpServerName, podIP, serverIP) - }, - ginkgo.Entry("When the network is IPv4", utilnet.IPv4), - ginkgo.Entry("When the network is IPv6", utilnet.IPv6), - ) - - ginkgo.It("Can reach KAPI service", func() { - ginkgo.By("Ensuring a request from the pod can reach KAPI service") - output, err := e2epodoutput.RunHostCmdWithRetries( - testPod.Namespace, - testPod.Name, - "curl --max-time 2 -g -q -s -k https://kubernetes.default/healthz", - framework.Poll, - timeout, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.Equal("ok")) - }) - - ginkgo.DescribeTable("It cannot reach an external server on a different network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the pod cannot reach the external server") - // using the external server setup for the default network - bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpExternalNetworkName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - iface, err := infraprovider.Get().GetK8NodeNetworkInterface(serverContainerName, bgpServerNetwork) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) - gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) - testPodToClientIPNOK(testPod, serverIP) - }, - ginkgo.Entry("When the network is IPv4", utilnet.IPv4), - ginkgo.Entry("When the network is IPv6", utilnet.IPv6), - ) - - ginkgo.DescribeTable("It cannot be reached by an external server on a different network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the external server cannot reach the pod") - podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - testPod.Namespace, - testPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(podIP).ToNot(gomega.BeEmpty()) - // using the external server setup for the default network - testContainerToClientIPNOK(serverContainerName, podIP) - }, - ginkgo.Entry("When the network is IPv4", utilnet.IPv4), - ginkgo.Entry("When the network is IPv6", utilnet.IPv6), - ) - - ginkgo.DescribeTableSubtree("It cannot be reached by a cluster node", - func(getNode func() string) { - ginkgo.DescribeTable("", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the node cannot reach the tested network pod") - podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - testPod.Namespace, - testPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(podIP).ToNot(gomega.BeEmpty()) - testContainerToClientIPNOK(getNode(), podIP) - }, - ginkgo.Entry("When the network is IPv4", utilnet.IPv4), - ginkgo.Entry("When the network is IPv6", utilnet.IPv6), - ) - }, - ginkgo.Entry("When it is the same node", getSameNode), - ginkgo.Entry("When it is a different node", getDifferentNode), - ) - - ginkgo.DescribeTableSubtree("When other pod runs on the tested network", - func(getNode func() string) { - var otherPod *corev1.Pod - - ginkgo.BeforeEach(func() { - ginkgo.By("Running other pod on the tested network namespace") - otherPod = e2epod.CreateExecPodOrFail( - context.Background(), - f.ClientSet, - testNamespace.Name, - testNamespace.Name+"-netexec-pod", - func(p *corev1.Pod) { - p.Spec.Containers[0].Args = []string{"netexec"} - p.Labels = map[string]string{"app": "netexec-pod"} - }, - ) - }) - - ginkgo.DescribeTable("The pods on the tested network can reach each other", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the first pod can reach the second pod") - otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - otherPod.Namespace, - otherPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) - testPodToClientIP(testPod, otherPodIP) - }, - ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), - ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), - ) - - ginkgo.Describe("Backing a ClusterIP service", func() { - var service *corev1.Service - - ginkgo.BeforeEach(func() { - ginkgo.By("Creating a service backed by the other network pod") - service = e2eservice.CreateServiceSpec( - "service-for-netexec", - "", - false, - otherPod.Labels, - ) - service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} - familyPolicy := corev1.IPFamilyPolicyPreferDualStack - service.Spec.IPFamilyPolicy = &familyPolicy - var err error - service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.DescribeTable("The first pod can reach the ClusterIP service on the same network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the first pod can reach the ClusterIP service") - clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) - gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) - testPodToClientIP(testPod, clusterIP) - }, - ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), - ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), - ) - }) - }, - ginkgo.Entry("On the same node", getSameNode), - ginkgo.Entry("On a different node", getDifferentNode), - ) - - ginkgo.Describe("When there is other network", func() { - const ( - otherBGPPeerSubnetIPv4 = "172.136.0.0/16" - otherBGPPeerSubnetIPv6 = "fc00:f853:ccd:136::/64" - otherBGPServerSubnetIPv4 = "172.138.0.0/16" - otherBGPServerSubnetIPv6 = "fc00:f853:ccd:138::/64" - otherUDNCIDRv4 = "103.203.0.0/16" - otherUDNCIDRv6 = "2014:200:200::0/60" - ) - - var ( - otherLayer3NetworkSpec = &udnv1.NetworkSpec{ - Topology: udnv1.NetworkTopologyLayer3, - Layer3: &udnv1.Layer3Config{ - Role: "Primary", - Subnets: []udnv1.Layer3Subnet{{CIDR: otherUDNCIDRv4, HostSubnet: 24}, {CIDR: otherUDNCIDRv6, HostSubnet: 64}}, - }, - } - otherLayer2NetworkSpec = &udnv1.NetworkSpec{ - Topology: udnv1.NetworkTopologyLayer2, - Layer2: &udnv1.Layer2Config{ - Role: "Primary", - Subnets: udnv1.DualStackCIDRs{otherUDNCIDRv4, otherUDNCIDRv6}, - }, - } - ) - - otherNetworksToTest := []ginkgo.TableEntry{ - ginkgo.Entry("Default", defaultNetwork, nil), - ginkgo.Entry("Layer 3 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer3NetworkSpec), - ginkgo.Entry("Layer 2 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer2NetworkSpec), - // The following testcases are labeled as extended, - // might not be run on all jobs - ginkgo.Entry("Layer 3 UDN non advertised", udn, otherLayer3NetworkSpec, label.Extended()), - ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec, label.Extended()), - ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec, label.Extended()), - ginkgo.Entry("Layer 2 CUDN advertised", cudnAdvertised, otherLayer2NetworkSpec, label.Extended()), - } - - ginkgo.DescribeTableSubtree("Of type", - func(networkType networkType, networkSpec *udnv1.NetworkSpec) { - var otherNamespace *corev1.Namespace - var otherNetworkName string - - ginkgo.BeforeEach(func() { - otherNetworkName = testBaseName + "-other" - otherNamespaceName := otherNetworkName - - switch { - case networkSpec == nil: - // noop - case networkSpec.Layer3 != nil: - networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) - case networkSpec.Layer2 != nil: - networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) - } - - // we will create a agnhost server on an extra network peered with BGP - switch networkType { - case cudnAdvertisedVRFLite: - ginkgo.By("Running other BGP network with an agnhost server") - otherBGPServerName := otherNetworkName + "-bgpserver" - bgpPeerCIDRs := []string{otherBGPPeerSubnetIPv4, otherBGPPeerSubnetIPv6} - bgpServerCIDRs := []string{otherBGPServerSubnetIPv4, otherBGPServerSubnetIPv6} - gomega.Expect(runBGPNetworkAndServer(f, ictx, otherNetworkName, otherBGPServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) - case defaultNetwork: - otherNetworkName = "default" - } - - ginkgo.By("Creating the other namespace and network") - var err error - otherNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, otherNamespaceName, networkType, networkSpec) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.DescribeTableSubtree("And a pod runs on the other network", - func(getNode func() string) { - var otherPod *corev1.Pod - - ginkgo.BeforeEach(func() { - ginkgo.By("Running a pod on the other network namespace") - otherPod = e2epod.CreateExecPodOrFail( - context.Background(), - f.ClientSet, - otherNamespace.Name, - otherNamespace.Name+"-netexec-pod", - func(p *corev1.Pod) { - p.Spec.Containers[0].Args = []string{"netexec"} - p.Spec.NodeName = getNode() - p.Labels = map[string]string{"app": "netexec-pod"} - }, - ) - }) - - ginkgo.DescribeTable("The pod on the tested network cannot reach the pod on the other network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") - otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - otherPod.Namespace, - otherPod.Name, - otherNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) - testPodToClientIPNOK(testPod, otherPodIP) - }, - ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), - ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), - ) - - ginkgo.DescribeTable("The pod on the other network cannot reach the pod on the tested network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the other network pod cannot reach the tested network pod") - testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( - f.ClientSet, - testPod.Namespace, - testPod.Name, - testNetworkName, - family, - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) - testPodToClientIPNOK(otherPod, testPodIP) - }, - ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), - ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), - ) - - ginkgo.Describe("Backing a ClusterIP service", func() { - var service *corev1.Service - - ginkgo.BeforeEach(func() { - ginkgo.By("Creating a service backed by the other network pod") - service = e2eservice.CreateServiceSpec( - "service-for-netexec", - "", - false, - otherPod.Labels, - ) - service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} - familyPolicy := corev1.IPFamilyPolicyPreferDualStack - service.Spec.IPFamilyPolicy = &familyPolicy - var err error - service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.DescribeTable("The pod on the tested network cannot reach the service on the other network", - func(family utilnet.IPFamily) { - if !ipFamilySet.Has(family) { - e2eskipper.Skipf("IP family %v not supported", family) - } - ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") - clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) - gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) - testPodToClientIPNOK(testPod, clusterIP) - }, - ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), - ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), - ) - }) - }, - ginkgo.Entry("On the same node", getSameNode), - ginkgo.Entry("On a different node", getDifferentNode), - ) - }, - otherNetworksToTest, - ) - }) - }) - }, - networksToTest, - ) -}) - -// routeAdvertisementsReadyFunc returns a function that checks for the -// Accepted condition in the provided RouteAdvertisements -func routeAdvertisementsReadyFunc(c raclientset.Clientset, name string) func() error { - return func() error { - ra, err := c.K8sV1().RouteAdvertisements().Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return err - } - conditionType := "Accepted" - condition := meta.FindStatusCondition(ra.Status.Conditions, conditionType) - if condition == nil { - return fmt.Errorf("no %q condition found in: %v", conditionType, ra) - } - if condition.Status != metav1.ConditionTrue { - return fmt.Errorf("condition %v has unexpected status %v", condition, condition.Status) - } - return nil - } -} - -// templateInputRouter data -type templateInputRouter struct { - VRF string - NeighborsIPv4 []string - NeighborsIPv6 []string - NetworksIPv4 []string - NetworksIPv6 []string -} - -// templateInputFRR data -type templateInputFRR struct { - // Name and Label are used for FRRConfiguration metadata - Name string - Labels map[string]string - Routers []templateInputRouter -} - -// for routeadvertisements test cases we generate configuration from templates embed in the program -// -//go:embed testdata/routeadvertisements -var ratestdata embed.FS -var tmplDir = filepath.Join("testdata", "routeadvertisements") - -const frrImage = "quay.io/frrouting/frr:9.1.3" - -// generateFRRConfiguration to establish a BGP session towards the provided -// neighbors in the network's VRF configured to advertised the provided -// networks. Returns a temporary directory where the configuration is generated. -func generateFRRConfiguration(neighborIPs, advertiseNetworks []string) (directory string, err error) { - // parse configuration templates - var templates *template.Template - templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr", "*.tmpl")) - if err != nil { - return "", fmt.Errorf("failed to parse templates: %w", err) - } - - // create the directory that will hold the configuration files - directory, err = os.MkdirTemp("", "frrconf-") - if err != nil { - return "", fmt.Errorf("failed to make temp directory: %w", err) - } - defer func() { - if err != nil { - os.RemoveAll(directory) - } - }() - - // generate external frr configuration executing the templates - networksIPv4, networksIPv6 := splitCIDRStringsByIPFamily(advertiseNetworks) - neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) - conf := templateInputFRR{ - Routers: []templateInputRouter{ - { - NeighborsIPv4: neighborsIPv4, - NetworksIPv4: networksIPv4, - NeighborsIPv6: neighborsIPv6, - NetworksIPv6: networksIPv6, - }, - }, - } - - err = executeFileTemplate(templates, directory, "frr.conf", conf) - if err != nil { - return "", fmt.Errorf("failed to execute template %q: %w", "frr.conf", err) - } - err = executeFileTemplate(templates, directory, "daemons", nil) - if err != nil { - return "", fmt.Errorf("failed to execute template %q: %w", "daemons", err) - } - - return directory, nil -} - -// generateFRRk8sConfiguration for the provided network (which doubles up as the -// FRRConfiguration instance name, VRF name and used as value of `network` -// label) to establish a BGP session towards the provided neighbors in the -// network's VRF, configured to receive advertisements for the provided -// networks. Returns a temporary directory where the configuration is generated. -func generateFRRk8sConfiguration(networkName string, neighborIPs, receiveNetworks []string) (directory string, err error) { - // parse configuration templates - var templates *template.Template - templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr-k8s", "*.tmpl")) - if err != nil { - return "", fmt.Errorf("failed to parse templates: %w", err) - } - - // create the directory that will hold the configuration files - directory, err = os.MkdirTemp("", "frrk8sconf-") - if err != nil { - return "", fmt.Errorf("failed to make temp directory: %w", err) - } - defer func() { - if err != nil { - os.RemoveAll(directory) - } - }() - - receivesIPv4, receivesIPv6 := splitCIDRStringsByIPFamily(receiveNetworks) - neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) - conf := templateInputFRR{ - Name: networkName, - Labels: map[string]string{"network": networkName}, - Routers: []templateInputRouter{ - { - VRF: networkName, - NeighborsIPv4: neighborsIPv4, - NeighborsIPv6: neighborsIPv6, - NetworksIPv4: receivesIPv4, - NetworksIPv6: receivesIPv6, - }, - }, - } - err = executeFileTemplate(templates, directory, "frrconf.yaml", conf) - if err != nil { - return "", fmt.Errorf("failed to execute template %q: %w", "frrconf.yaml", err) - } - - return directory, nil -} - -// runBGPNetworkAndServer configures a topology appropriate to be used with -// route advertisement test cases. For VRF-Lite test cases, the caller is -// resposible to attach the peer network interface to the CUDN VRF on the nodes. -// -// ----------------- ------------------ --------------- -// | | serverNetwork | | peerNetwork | | -// | external |<--------------- | FRR router |<--( Default / CUDN VRF )-- | cluster | -// | server | | | | | -// ----------------- ------------------ --------------- -func runBGPNetworkAndServer( - f *framework.Framework, - ictx infraapi.Context, - networkName, serverName string, - peerNetworks, - serverNetworks []string, -) error { - // filter networks by supported IP families - families := getSupportedIPFamiliesSlice(f.ClientSet) - peerNetworks = matchCIDRStringsByIPFamily(peerNetworks, families...) - serverNetworks = matchCIDRStringsByIPFamily(serverNetworks, families...) - - // create BGP peer network - bgpPeerNetwork, err := ictx.CreateNetwork(networkName, peerNetworks...) - if err != nil { - return fmt.Errorf("failed to create peer network %v: %w", peerNetworks, err) - } - - // create the server network - serverNetwork, err := ictx.CreateNetwork(serverName, serverNetworks...) - if err != nil { - return fmt.Errorf("failed to create server network %v: %w", serverNetworks, err) - } - - // attach BGP peer network to all nodes - var nodeIPs []string - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("failed to list nodes: %w", err) - } - for _, node := range nodeList.Items { - iface, err := ictx.AttachNetwork(bgpPeerNetwork, node.Name) - if err != nil { - return fmt.Errorf("failed to attach node %q to network: %w", node.Name, err) - } - nodeIPs = append(nodeIPs, iface.IPv4, iface.IPv6) - } - - // run frr container - advertiseNetworks := serverNetworks - frrConfig, err := generateFRRConfiguration(nodeIPs, advertiseNetworks) - if err != nil { - return fmt.Errorf("failed to generate FRR configuration: %w", err) - } - ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrConfig) }) - frr := infraapi.ExternalContainer{ - Name: networkName + "-frr", - Image: frrImage, - Network: bgpPeerNetwork, - RuntimeArgs: []string{"--volume", frrConfig + ":" + filepath.Join(filepath.FromSlash("/"), "etc", "frr")}, - } - frr, err = ictx.CreateExternalContainer(frr) - if err != nil { - return fmt.Errorf("failed to create frr container: %w", err) - } - // enable IPv6 forwarding if required - if frr.IPv6 != "" { - _, err = infraprovider.Get().ExecExternalContainerCommand(frr, []string{"sysctl", "-w", "net.ipv6.conf.all.forwarding=1"}) - if err != nil { - return fmt.Errorf("failed to set enable IPv6 forwading on frr container: %w", err) - } - } - - // connect frr to server network - frrServerNetworkInterface, err := ictx.AttachNetwork(serverNetwork, frr.Name) - if err != nil { - return fmt.Errorf("failed to connect frr to server network: %w", err) - } - - // run server container - server := infraapi.ExternalContainer{ - Name: serverName, - Image: images.AgnHost(), - CmdArgs: []string{"netexec"}, - Network: serverNetwork, - } - _, err = ictx.CreateExternalContainer(server) - if err != nil { - return fmt.Errorf("failed to create BGP server container: %w", err) - } - - // set frr as default gateway for the server - if frrServerNetworkInterface.IPv4 != "" { - _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "route", "add", "default", "via", frrServerNetworkInterface.IPv4}) - if err != nil { - return fmt.Errorf("failed to set default IPv4 gateway on BGP server container: %w", err) - } - } - if frrServerNetworkInterface.IPv6 != "" { - _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "-6", "route", "add", "default", "via", frrServerNetworkInterface.IPv6}) - if err != nil { - return fmt.Errorf("failed to set default IPv6 gateway on BGP server container: %w", err) - } - - } - - // apply FRR-K8s Configuration - receiveNetworks := serverNetworks - frrK8sConfig, err := generateFRRk8sConfiguration(networkName, []string{frr.IPv4, frr.IPv6}, receiveNetworks) - if err != nil { - return fmt.Errorf("failed to generate FRR-k8s configuration: %w", err) - } - ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrK8sConfig) }) - _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "create", "-f", frrK8sConfig) - if err != nil { - return fmt.Errorf("failed to apply FRRConfiguration: %w", err) - } - ictx.AddCleanUpFn(func() error { - _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "delete", "-f", frrK8sConfig) - if err != nil { - return fmt.Errorf("failed to delete FRRConfiguration: %w", err) - } - return nil - }) - - return nil -} - -type networkType string - -const ( - defaultNetwork networkType = "DEFAULT" - udn networkType = "UDN" - cudn networkType = "CUDN" - cudnAdvertised networkType = "CUDN_ADVERTISED" - cudnAdvertisedVRFLite networkType = "CUDN_ADVERTISED_VRFLITE" -) - -// createNamespaceWithPrimaryNetworkOfType helper function configures a -// namespace, a optional(C)UDN and an optional RouteAdvertisements as determined -// by `networkType` argument. The RouteAdvertisements is aligned with the -// configuration done with `runBGPNetworkAndServer` for VRF-Lite scenarios. -func createNamespaceWithPrimaryNetworkOfType( - f *framework.Framework, - ictx infraapi.Context, - test, name string, - networkType networkType, - networkSpec *udnv1.NetworkSpec, -) (*corev1.Namespace, error) { - // define some configuration based on the type of namespace/network/advertisement - var targetVRF string - var networkLabels map[string]string - var frrConfigurationLabels map[string]string - switch networkType { - case cudnAdvertised: - networkLabels = map[string]string{"advertise": name} - frrConfigurationLabels = map[string]string{"name": "receive-all"} - case cudnAdvertisedVRFLite: - targetVRF = name - networkLabels = map[string]string{"advertise": name} - frrConfigurationLabels = map[string]string{"network": name} - } - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "e2e-framework": test, - }, - }, - } - if networkType != defaultNetwork { - namespace.Labels[RequiredUDNNamespaceLabel] = "" - } - namespace, err := f.ClientSet.CoreV1().Namespaces().Create( - context.Background(), - namespace, - metav1.CreateOptions{}, - ) - if err != nil { - return nil, fmt.Errorf("failed to create namespace: %w", err) - } - ictx.AddCleanUpFn(func() error { - return f.ClientSet.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{}) - }) - - // just creating a namespace with default network, return - if networkType == defaultNetwork { - return namespace, nil - } - - err = createUserDefinedNetwork( - f, - ictx, - namespace, - name, - networkType != udn, - networkSpec, - networkLabels, - ) - if err != nil { - return nil, fmt.Errorf("failed to create primary network: %w", err) - } - - // not advertised, return - if networkType == udn || networkType == cudn { - return namespace, nil - } - - err = createRouteAdvertisements( - f, - ictx, - name, - targetVRF, - networkLabels, - frrConfigurationLabels, - ) - if err != nil { - return nil, fmt.Errorf("failed to create primary network: %w", err) - } - - return namespace, nil -} - -func createUserDefinedNetwork( - f *framework.Framework, - ictx infraapi.Context, - namespace *corev1.Namespace, - name string, - cudnType bool, - networkSpec *udnv1.NetworkSpec, - networkLabels map[string]string, -) error { - var gvr schema.GroupVersionResource - var gvk schema.GroupVersionKind - var obj runtime.Object - var client dynamic.ResourceInterface - switch { - case cudnType: - gvr = clusterUDNGVR - gvk = schema.GroupVersionKind{ - Group: gvr.Group, - Version: gvr.Version, - Kind: "ClusterUserDefinedNetwork", - } - client = f.DynamicClient.Resource(gvr) - obj = &udnv1.ClusterUserDefinedNetwork{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: networkLabels, - }, - Spec: udnv1.ClusterUserDefinedNetworkSpec{ - NamespaceSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "kubernetes.io/metadata.name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{namespace.Name}, - }}}, - Network: *networkSpec, - }, - } - default: - gvr = udnGVR - gvk = schema.GroupVersionKind{ - Group: gvr.Group, - Version: gvr.Version, - Kind: "UserDefinedNetwork", - } - client = f.DynamicClient.Resource(gvr).Namespace(namespace.Name) - obj = &udnv1.UserDefinedNetwork{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace.Name, - Labels: networkLabels, - }, - Spec: udnv1.UserDefinedNetworkSpec{ - Topology: networkSpec.Topology, - Layer3: networkSpec.Layer3, - Layer2: networkSpec.Layer2, - }, - } - } - - unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return fmt.Errorf("failed to convert network to unstructured: %w", err) - } - unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap} - ok := unstructuredObj.GetObjectKind() - ok.SetGroupVersionKind(gvk) - - _, err = client.Create(context.Background(), unstructuredObj, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to convert network to unstructured: %w", err) - } - ictx.AddCleanUpFn(func() error { - return client.Delete(context.Background(), name, metav1.DeleteOptions{}) - }) - wait.PollUntilContextTimeout( - context.Background(), - time.Second, - 5*time.Second, - true, - func(ctx context.Context) (bool, error) { - err = networkReadyFunc(client, name)() - return err == nil, nil - }, - ) - if err != nil { - return fmt.Errorf("failed to wait for the network to be ready: %w", err) - } - - return nil -} - -func createRouteAdvertisements( - f *framework.Framework, - ictx infraapi.Context, - name string, - targetVRF string, - networkMatchLabels map[string]string, - frrconfigurationMatchLabels map[string]string, -) error { - ra := &rav1.RouteAdvertisements{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: rav1.RouteAdvertisementsSpec{ - NetworkSelectors: apitypes.NetworkSelectors{ - apitypes.NetworkSelector{ - NetworkSelectionType: apitypes.ClusterUserDefinedNetworks, - ClusterUserDefinedNetworkSelector: &apitypes.ClusterUserDefinedNetworkSelector{ - NetworkSelector: metav1.LabelSelector{ - MatchLabels: networkMatchLabels, - }, - }, - }, - }, - FRRConfigurationSelector: metav1.LabelSelector{ - MatchLabels: frrconfigurationMatchLabels, - }, - NodeSelector: metav1.LabelSelector{}, - Advertisements: []rav1.AdvertisementType{ - rav1.PodNetwork, - }, - TargetVRF: targetVRF, - }, - } - - raClient, err := raclientset.NewForConfig(f.ClientConfig()) - if err != nil { - return fmt.Errorf("failed to create RouteAdvertisements client: %w", err) - } - _, err = raClient.K8sV1().RouteAdvertisements().Create(context.TODO(), ra, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create RouteAdvertisements: %w", err) - } - ictx.AddCleanUpFn(func() error { - return raClient.K8sV1().RouteAdvertisements().Delete(context.Background(), name, metav1.DeleteOptions{}) - }) - wait.PollUntilContextTimeout( - context.Background(), - time.Second, - 5*time.Second, - true, - func(ctx context.Context) (bool, error) { - err = routeAdvertisementsReadyFunc(*raClient, name)() - return err == nil, nil - }, - ) - if err != nil { - return fmt.Errorf("failed to wait for the RouteAdvertisements to be ready: %w", err) - } - - return nil -} diff --git a/test/e2e/service.go b/test/e2e/service.go index 6e3ff61c27..0df017d523 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -812,7 +812,7 @@ var _ = ginkgo.Describe("Services", feature.Service, func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created", externalContainer.Name) @@ -1011,7 +1011,7 @@ var _ = ginkgo.Describe("Services", feature.Service, func() { Name: targetSecondaryContainerName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - CmdArgs: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), + Args: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), ExtPort: serverExternalContainerPort, } serverExternalContainer, err := providerCtx.CreateExternalContainer(serverExternalContainerSpec) @@ -1315,7 +1315,7 @@ spec: ginkgo.By("Creating an external client") externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - CmdArgs: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} + Args: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container", externalContainer) diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go b/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go similarity index 94% rename from test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go rename to test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go index d7e3590ffd..e1ce9e8c70 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetInvalidMTU = []testscenario.ValidateCRScenario{ +var LocalnetInvalidMTU = []testdata.ValidateCRScenario{ { Description: "invalid MTU - higher than 65536", ExpectedErr: `spec.network.localnet.mtu in body should be less than or equal to 65536`, diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go b/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go similarity index 97% rename from test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go rename to test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go index 171678c9ca..83c6664804 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetInvalidPhyNetName = []testscenario.ValidateCRScenario{ +var LocalnetInvalidPhyNetName = []testdata.ValidateCRScenario{ { Description: "unset PhysicalNetworkName", ExpectedErr: `spec.network.localnet.physicalNetworkName: Required value`, diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go b/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go similarity index 87% rename from test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go rename to test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go index 443f78970a..fad452da04 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetInvalidRole = []testscenario.ValidateCRScenario{ +var LocalnetInvalidRole = []testdata.ValidateCRScenario{ { Description: "role unset", ExpectedErr: `spec.network.localnet.role: Required value`, diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go b/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go similarity index 98% rename from test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go rename to test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go index bd854acdb2..d62a216d48 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetInvalidSubnets = []testscenario.ValidateCRScenario{ +var LocalnetInvalidSubnets = []testdata.ValidateCRScenario{ { Description: "unset subnets, and ipam.mode is unset", ExpectedErr: `Subnets is required with ipam.mode is Enabled or unset, and forbidden otherwise`, diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go b/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go similarity index 95% rename from test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go rename to test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go index 8ab71ca8dc..daa393acdb 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetInvalidVLAN = []testscenario.ValidateCRScenario{ +var LocalnetInvalidVLAN = []testdata.ValidateCRScenario{ { Description: "invalid VLAN - invalid mode", ExpectedErr: `spec.network.localnet.vlan.mode: Unsupported value: "Disabled": supported values: "Access`, diff --git a/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go b/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go similarity index 95% rename from test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go rename to test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go index ddad69d54e..80551a94cd 100644 --- a/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go +++ b/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var MismatchTopologyConfig = []testscenario.ValidateCRScenario{ +var MismatchTopologyConfig = []testdata.ValidateCRScenario{ { Description: "topology is localnet but topology config is layer2", ExpectedErr: `spec.localnet is required when topology is Localnet and forbidden otherwise`, diff --git a/test/e2e/testscenario/cudn/valid-scenarios-localnet.go b/test/e2e/testdata/cudn/valid-scenarios-localnet.go similarity index 93% rename from test/e2e/testscenario/cudn/valid-scenarios-localnet.go rename to test/e2e/testdata/cudn/valid-scenarios-localnet.go index d2c7b24d78..a5b188bbfd 100644 --- a/test/e2e/testscenario/cudn/valid-scenarios-localnet.go +++ b/test/e2e/testdata/cudn/valid-scenarios-localnet.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" -var LocalnetValid = []testscenario.ValidateCRScenario{ +var LocalnetValid = []testdata.ValidateCRScenario{ { Description: "should create localnet topology successfully - minimal", Manifest: ` diff --git a/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl b/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl deleted file mode 100644 index ba4b4605ad..0000000000 --- a/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl +++ /dev/null @@ -1,46 +0,0 @@ -{{- define "frrconf.yaml" -}} -apiVersion: frrk8s.metallb.io/v1beta1 -kind: FRRConfiguration -metadata: - name: {{ .Name }} -{{- if .Labels }} - labels: -{{- range $k, $v := .Labels }} - {{ $k }}: {{ $v }} -{{- end }} -{{- end }} -spec: - bgp: - routers: -{{- range $v := .Routers }} - - asn: 64512 -{{- if .VRF }} - vrf: {{ .VRF }} -{{- end }} - neighbors: -{{- range .NeighborsIPv4 }} - - address: {{ . }} - asn: 64512 - disableMP: true - toReceive: - allowed: - mode: filtered - prefixes: -{{- range $v.NetworksIPv4 }} - - prefix: {{ . }} -{{- end }} -{{- end }} -{{- range .NeighborsIPv6 }} - - address: {{ . }} - asn: 64512 - disableMP: true - toReceive: - allowed: - mode: filtered - prefixes: -{{- range $v.NetworksIPv6 }} - - prefix: {{ . }} -{{- end }} -{{- end }} -{{- end }} -{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl b/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl deleted file mode 100644 index 5434bdf418..0000000000 --- a/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl +++ /dev/null @@ -1,82 +0,0 @@ -{{- define "daemons" -}} -# This file tells the frr package which daemons to start. -# -# Sample configurations for these daemons can be found in -# /usr/share/doc/frr/examples/. -# -# ATTENTION: -# -# When activating a daemon for the first time, a config file, even if it is -# empty, has to be present *and* be owned by the user and group "frr", else -# the daemon will not be started by /etc/init.d/frr. The permissions should -# be u=rw,g=r,o=. -# When using "vtysh" such a config file is also needed. It should be owned by -# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. -# -# The watchfrr and zebra daemons are always started. -# -bgpd=yes -ospfd=no -ospf6d=no -ripd=no -ripngd=no -isisd=no -pimd=no -ldpd=no -nhrpd=no -eigrpd=no -babeld=no -sharpd=no -pbrd=no -bfdd=yes -fabricd=no -vrrpd=no - -# -# If this option is set the /etc/init.d/frr script automatically loads -# the config via "vtysh -b" when the servers are started. -# Check /etc/pam.d/frr if you intend to use "vtysh"! -# -vtysh_enable=yes -zebra_options=" -A 127.0.0.1 -s 90000000" -bgpd_options=" -A 127.0.0.1" -ospfd_options=" -A 127.0.0.1" -ospf6d_options=" -A ::1" -ripd_options=" -A 127.0.0.1" -ripngd_options=" -A ::1" -isisd_options=" -A 127.0.0.1" -pimd_options=" -A 127.0.0.1" -ldpd_options=" -A 127.0.0.1" -nhrpd_options=" -A 127.0.0.1" -eigrpd_options=" -A 127.0.0.1" -babeld_options=" -A 127.0.0.1" -sharpd_options=" -A 127.0.0.1" -pbrd_options=" -A 127.0.0.1" -staticd_options="-A 127.0.0.1" -bfdd_options=" -A 127.0.0.1" -fabricd_options="-A 127.0.0.1" -vrrpd_options=" -A 127.0.0.1" - -# configuration profile -# -#frr_profile="traditional" -#frr_profile="datacenter" - -# -# This is the maximum number of FD's that will be available. -# Upon startup this is read by the control files and ulimit -# is called. Uncomment and use a reasonable value for your -# setup if you are expecting a large number of peers in -# say BGP. -#MAX_FDS=1024 - -# The list of daemons to watch is automatically generated by the init script. -#watchfrr_options="" - -# for debugging purposes, you can specify a "wrap" command to start instead -# of starting the daemon directly, e.g. to use valgrind on ospfd: -# ospfd_wrap="/usr/bin/valgrind" -# or you can use "all_wrap" for all daemons, e.g. to use perf record: -# all_wrap="/usr/bin/perf record --call-graph -" -# the normal daemon command is added to this at the end. -{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl b/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl deleted file mode 100644 index a1beeab410..0000000000 --- a/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl +++ /dev/null @@ -1,57 +0,0 @@ -{{- define "frr.conf" -}} -debug zebra events -debug zebra nht detailed -debug zebra kernel -debug zebra rib detail -debug zebra nexthop detail -debug bgp keepalives -debug bgp neighbor-events -debug bgp nht -debug bgp updates -debug bgp zebra -log stdout debugging -log syslog debugging -log file /etc/frr/frr.log debugging -{{ range .Routers -}} -router bgp 64512 {{ if .VRF }}vrf {{ .VRF }}{{ end }} - no bgp default ipv4-unicast - no bgp default ipv6-unicast - no bgp network import-check -{{- range .NeighborsIPv4 }} - neighbor {{ . }} remote-as 64512 - # zebra has been observed to fail to start for unknown reasons, - # reduce timers to try to minimize delay impact on tests - neighbor {{ . }} timers connect 10 - neighbor {{ . }} timers 15 5 -{{- end }} -{{- range .NeighborsIPv6 }} - neighbor {{ . }} remote-as 64512 - neighbor {{ . }} timers connect 10 - neighbor {{ . }} timers 15 5 -{{- end }} -{{- if .NeighborsIPv4 }} - address-family ipv4 unicast -{{- range .NeighborsIPv4 }} - neighbor {{ . }} route-reflector-client - neighbor {{ . }} activate - neighbor {{ . }} next-hop-self -{{- end }} -{{- range .NetworksIPv4 }} - network {{ . }} -{{- end }} - exit-address-family -{{- end }} -{{- if .NeighborsIPv6 }} - address-family ipv6 unicast -{{- range .NeighborsIPv6 }} - neighbor {{ . }} route-reflector-client - neighbor {{ . }} activate - neighbor {{ . }} next-hop-self -{{- end }} -{{- range .NetworksIPv6 }} - network {{ . }} -{{- end }} - exit-address-family -{{- end }} -{{ end }} -{{ end }} diff --git a/test/e2e/testscenario/scenario.go b/test/e2e/testdata/scenario.go similarity index 90% rename from test/e2e/testscenario/scenario.go rename to test/e2e/testdata/scenario.go index 4ee247fd98..db96d3b50b 100644 --- a/test/e2e/testscenario/scenario.go +++ b/test/e2e/testdata/scenario.go @@ -1,4 +1,4 @@ -package testscenario +package testdata // ValidateCRScenario represent test scenario where a manifest is applied and failed with the expected error type ValidateCRScenario struct { diff --git a/test/e2e/util.go b/test/e2e/util.go index d03559e79e..89ab1e12c9 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -7,11 +7,9 @@ import ( "math/rand" "net" "os" - "path/filepath" "regexp" "strconv" "strings" - "text/template" "time" "github.com/onsi/ginkgo/v2" @@ -32,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/debug" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" @@ -168,7 +167,7 @@ func newAgnhostPodOnNode(name, nodeName string, labels map[string]string, comman } // IsIPv6Cluster returns true if the kubernetes default service is IPv6 -func IsIPv6Cluster(c kubernetes.Interface) bool { +func IsIPv6Cluster(c clientset.Interface) bool { // Get the ClusterIP of the kubernetes service created in the default namespace svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.Background(), "kubernetes", metav1.GetOptions{}) if err != nil { @@ -657,7 +656,7 @@ func waitClusterHealthy(f *framework.Framework, numControlPlanePods int, control // successfully rolled out following an update. // // If allowedNotReadyNodes is -1, this method returns immediately without waiting. -func waitForRollout(c kubernetes.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { +func waitForRollout(c clientset.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { if allowedNotReadyNodes == -1 { return nil } @@ -1130,24 +1129,24 @@ func randStr(n int) string { return string(b) } -func isCIDRIPFamilySupported(cs kubernetes.Interface, cidr string) bool { +func isCIDRIPFamilySupported(cs clientset.Interface, cidr string) bool { ginkgo.GinkgoHelper() gomega.Expect(cidr).To(gomega.ContainSubstring("/")) isIPv6 := utilnet.IsIPv6CIDRString(cidr) return (isIPv4Supported(cs) && !isIPv6) || (isIPv6Supported(cs) && isIPv6) } -func isIPv4Supported(cs kubernetes.Interface) bool { +func isIPv4Supported(cs clientset.Interface) bool { v4, _ := getSupportedIPFamilies(cs) return v4 } -func isIPv6Supported(cs kubernetes.Interface) bool { +func isIPv6Supported(cs clientset.Interface) bool { _, v6 := getSupportedIPFamilies(cs) return v6 } -func getSupportedIPFamilies(cs kubernetes.Interface) (bool, bool) { +func getSupportedIPFamilies(cs clientset.Interface) (bool, bool) { n, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) framework.ExpectNoError(err, "must fetch a Ready Node") v4NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv4Protocol) @@ -1155,19 +1154,6 @@ func getSupportedIPFamilies(cs kubernetes.Interface) (bool, bool) { return len(v4NodeAddrs) > 0, len(v6NodeAddrs) > 0 } -func getSupportedIPFamiliesSlice(cs kubernetes.Interface) []utilnet.IPFamily { - v4, v6 := getSupportedIPFamilies(cs) - switch { - case v4 && v6: - return []utilnet.IPFamily{utilnet.IPv4, utilnet.IPv6} - case v4: - return []utilnet.IPFamily{utilnet.IPv4} - case v6: - return []utilnet.IPFamily{utilnet.IPv6} - } - return nil -} - func isInterconnectEnabled() bool { val, present := os.LookupEnv("OVN_ENABLE_INTERCONNECT") return present && val == "true" @@ -1241,7 +1227,7 @@ func routeToNode(nodeName string, ips []string, mtu int, add bool) error { cmd = []string{"ip", "-6"} } var err error - cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", deploymentconfig.Get().ExternalBridgeName()) + cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", "breth0") if mtu != 0 { cmd = append(cmd, "mtu", strconv.Itoa(mtu)) } @@ -1295,7 +1281,7 @@ func GetNodeIPv6LinkLocalAddressForEth0(nodeName string) (string, error) { // right-most match of the provided regex. Returns a map of subexpression name // to subexpression capture. A zero string name `""` maps to the full expression // capture. -func CaptureContainerOutput(ctx context.Context, c kubernetes.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { +func CaptureContainerOutput(ctx context.Context, c clientset.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { regex, err := regexp.Compile(regexpr) if err != nil { return nil, fmt.Errorf("failed to compile regexp %q: %w", regexpr, err) @@ -1366,62 +1352,9 @@ func matchIPv6StringFamily(ipStrings []string) (string, error) { return util.MatchIPStringFamily(true /*ipv6*/, ipStrings) } -func matchCIDRStringsByIPFamily(cidrs []string, families ...utilnet.IPFamily) []string { - var r []string - familySet := sets.New(families...) - for _, cidr := range cidrs { - if familySet.Has(utilnet.IPFamilyOfCIDRString(cidr)) { - r = append(r, cidr) - } - } - return r -} - -func splitCIDRStringsByIPFamily(cidrs []string) (ipv4 []string, ipv6 []string) { - for _, cidr := range cidrs { - switch { - case utilnet.IsIPv4CIDRString(cidr): - ipv4 = append(ipv4, cidr) - case utilnet.IsIPv6CIDRString(cidr): - ipv6 = append(ipv6, cidr) - } - } - return -} - -func splitIPStringsByIPFamily(ips []string) (ipv4 []string, ipv6 []string) { - for _, ip := range ips { - switch { - case utilnet.IsIPv4String(ip): - ipv4 = append(ipv4, ip) - case utilnet.IsIPv6String(ip): - ipv6 = append(ipv6, ip) - } - } - return -} - -func getFirstCIDROfFamily(family utilnet.IPFamily, ipnets []*net.IPNet) *net.IPNet { - for _, ipnet := range ipnets { - if utilnet.IPFamilyOfCIDR(ipnet) == family { - return ipnet - } - } - return nil -} - -func getFirstIPStringOfFamily(family utilnet.IPFamily, ips []string) string { - for _, ip := range ips { - if utilnet.IPFamilyOfString(ip) == family { - return ip - } - } - return "" -} - // This is a replacement for e2epod.DeletePodWithWait(), which does not handle pods that // may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWait(ctx context.Context, c kubernetes.Interface, pod *v1.Pod) error { +func deletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error { if pod == nil { return nil } @@ -1449,7 +1382,7 @@ func deletePodWithWait(ctx context.Context, c kubernetes.Interface, pod *v1.Pod) // This is a replacement for e2epod.DeletePodWithWaitByName(), which does not handle pods // that may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWaitByName(ctx context.Context, c kubernetes.Interface, podName, podNamespace string) error { +func deletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error { pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -1467,7 +1400,7 @@ func deletePodWithWaitByName(ctx context.Context, c kubernetes.Interface, podNam // This is an alternative version of e2epod.WaitForPodNotFoundInNamespace(), which takes // a UID as well. -func waitForPodNotFoundInNamespace(ctx context.Context, c kubernetes.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { +func waitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*v1.Pod, error) { pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -1501,18 +1434,3 @@ func getAgnHostHTTPPortBindFullCMD(port uint16) []string { func getAgnHostHTTPPortBindCMDArgs(port uint16) []string { return []string{"netexec", fmt.Sprintf("--http-port=%d", port)} } - -// executeFileTemplate executes `name` template from the provided `templates` -// using `data`as input and writes the results to `directory/name` -func executeFileTemplate(templates *template.Template, directory, name string, data any) error { - f, err := os.OpenFile(filepath.Join(directory, name), os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - defer f.Close() - err = templates.ExecuteTemplate(f, name, data) - if err != nil { - return err - } - return nil -} diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index 096debe8a6..59fc1cd01a 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -40,14 +40,6 @@ skip() { SKIPPED_TESTS+=$* } -SKIPPED_LABELED_TESTS="" -skip_label() { - if [ "$SKIPPED_LABELED_TESTS" != "" ]; then - SKIPPED_LABELED_TESTS+=" && " - fi - SKIPPED_LABELED_TESTS+="!($*)" -} - if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # No support for these features in dual-stack yet @@ -146,11 +138,6 @@ if [ "$ENABLE_ROUTE_ADVERTISEMENTS" != true ]; then skip $BGP_TESTS else if [ "$ADVERTISE_DEFAULT_NETWORK" = true ]; then - # Filter out extended RouteAdvertisements tests to keep job run time down - if [ "$ENABLE_NETWORK_SEGMENTATION" = true ]; then - skip_label "Feature:RouteAdvertisements && EXTENDED" - fi - # Some test don't work when the default network is advertised, either because # the configuration that the test excercises does not make sense for an advertised network, or # there is some bug or functional gap @@ -158,9 +145,9 @@ else # pod reached from default network through secondary interface, asymetric, configuration does not make sense # TODO: perhaps the secondary network attached pods should not be attached to default network - skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on the same node" - skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on a different node" - + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on the same node" + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on a different node" + # these tests require metallb but the configuration we do for it is not compatible with the configuration we do to advertise the default network # TODO: consolidate configuration skip "Load Balancer Service Tests with MetalLB" @@ -216,7 +203,6 @@ go test -test.timeout ${GO_TEST_TIMEOUT}m -v . \ -ginkgo.timeout ${TEST_TIMEOUT}m \ -ginkgo.flake-attempts ${FLAKE_ATTEMPTS:-2} \ -ginkgo.skip="${SKIPPED_TESTS}" \ - ${SKIPPED_LABELED_TESTS:+-ginkgo.label-filter="${SKIPPED_LABELED_TESTS}"} \ -ginkgo.junit-report=${E2E_REPORT_DIR}/junit_${E2E_REPORT_PREFIX}report.xml \ -provider skeleton \ -kubeconfig ${KUBECONFIG} \ diff --git a/test/scripts/install-kind.sh b/test/scripts/install-kind.sh index 1b41646c7e..d7674159e1 100755 --- a/test/scripts/install-kind.sh +++ b/test/scripts/install-kind.sh @@ -78,5 +78,8 @@ else ./kind.sh fi +if [ "$KIND_INSTALL_KUBEVIRT" == true ]; then + sudo mv ./bin/virtctl /usr/local/bin/virtctl +fi popd # go our of $SCRIPT_DIR/../../contrib