From dccddff2f619094791a30f202b22e577f30a617b Mon Sep 17 00:00:00 2001 From: Riccardo Ravaioli Date: Wed, 21 Jan 2026 12:28:58 +0100 Subject: [PATCH 1/3] Remove IC zone migration HACK code Remove the temporary migration code that was added in 2023 to support the transition to OVN Interconnect (IC) architecture. This HACK code tracked whether remote zone nodes had completed migration using the "k8s.ovn.org/remote-zone-migrated" annotation. This code is no longer needed. Changes: - Remove OvnNodeMigratedZoneName constant and helper functions (SetNodeZoneMigrated, HasNodeMigratedZone, NodeMigratedZoneAnnotationChanged) - Remove migrated field from nodeInfo struct in node_tracker.go - Simplify isLocalZoneNode() in base_network_controller.go and egressip.go - Remove HACK helper functions (checkOVNSBNodeLRSR, fetchLBNames, lbExists, portExists) and migration sync flow from default_node_network_controller.go - Remove remote-zone-migrated annotation from webhook allowed annotations - Update tests to remove references to the migration annotation Assisted by Claude Opus 4.5 Signed-off-by: Riccardo Ravaioli (cherry picked from commit 7d408c1ca8bebc510cef32d3011634970c4122ed) (cherry picked from commit 83de58c1abc3e41e11913039ad2a78da519d2815) Signed-off-by: Riccardo Ravaioli (cherry picked from commit e61ba4baba83bd7b4f6facf5a8dc28780205c1c0) (cherry picked from commit ab1f57893768ef6d6cedaded87c7b164b82b1e8d) --- .../cmd/ovnkube-identity/ovnkubeidentity.go | 2 +- .../node/default_node_network_controller.go | 188 +----------------- .../pkg/ovn/base_network_controller.go | 14 -- .../ovn/controller/services/node_tracker.go | 27 +-- go-controller/pkg/ovn/egressip.go | 14 -- go-controller/pkg/ovn/egressip_test.go | 45 +---- go-controller/pkg/ovn/egressip_udn_l2_test.go | 12 -- go-controller/pkg/ovn/egressip_udn_l3_test.go | 10 - go-controller/pkg/ovn/multipolicy_test.go | 1 - go-controller/pkg/ovnwebhook/nodeadmission.go | 17 +- .../pkg/ovnwebhook/nodeadmission_test.go | 119 +---------- go-controller/pkg/util/node_annotations.go | 36 ---- 12 files changed, 12 insertions(+), 473 deletions(-) diff --git a/go-controller/cmd/ovnkube-identity/ovnkubeidentity.go b/go-controller/cmd/ovnkube-identity/ovnkubeidentity.go index c0fb146292..0e5f50b020 100644 --- a/go-controller/cmd/ovnkube-identity/ovnkubeidentity.go +++ b/go-controller/cmd/ovnkube-identity/ovnkubeidentity.go @@ -329,7 +329,7 @@ func runWebhook(ctx context.Context, restCfg *rest.Config) error { nodeWebhook := admission.WithCustomValidator( scheme.Scheme, &corev1.Node{}, - ovnwebhook.NewNodeAdmissionWebhook(cliCfg.enableInterconnect, cliCfg.enableHybridOverlay, cliCfg.extraAllowedUsers.Value()...), + ovnwebhook.NewNodeAdmissionWebhook(cliCfg.enableHybridOverlay, cliCfg.extraAllowedUsers.Value()...), ).WithRecoverPanic(true) nodeHandler, err := admission.StandaloneWebhook( diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 66722d9694..361e122430 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -799,60 +799,6 @@ func getOVNSBZone() (string, error) { return dbZone, nil } -/** HACK BEGIN **/ -// TODO(tssurya): Remove this HACK a few months from now. -// checkOVNSBNodeLRSR returns true if the logical router static route for the -// the given nodeSubnet is present in the SBDB -func checkOVNSBNodeLRSR(nodeSubnet *net.IPNet) bool { - var matchv4, matchv6 string - v6 := true - v4 := true - if config.IPv6Mode && utilnet.IsIPv6CIDR(nodeSubnet) { - matchv6 = fmt.Sprintf("match=\"reg7 == 0 && ip6.dst == %s\"", nodeSubnet) - stdout, stderr, err := util.RunOVNSbctl("--bare", "--columns", "_uuid", "find", "logical_flow", matchv6) - klog.Infof("Upgrade Hack: checkOVNSBNodeLRSR for node - %s : match %s : stdout - %s : stderr - %s : err %v", - nodeSubnet, matchv6, stdout, stderr, err) - v6 = (err == nil && stderr == "" && stdout != "") - } - if config.IPv4Mode && !utilnet.IsIPv6CIDR(nodeSubnet) { - matchv4 = fmt.Sprintf("match=\"reg7 == 0 && ip4.dst == %s\"", nodeSubnet) - stdout, stderr, err := util.RunOVNSbctl("--bare", "--columns", "_uuid", "find", "logical_flow", matchv4) - klog.Infof("Upgrade Hack: checkOVNSBNodeLRSR for node - %s : match %s : stdout - %s : stderr - %s : err %v", - nodeSubnet, matchv4, stdout, stderr, err) - v4 = (err == nil && stderr == "" && stdout != "") - } - return v6 && v4 -} - -func fetchLBNames() string { - stdout, stderr, err := util.RunOVNSbctl("--bare", "--columns", "name", "find", "Load_Balancer") - if err != nil || stderr != "" { - klog.Errorf("Upgrade hack: fetchLBNames could not fetch services %v/%v", err, stderr) - return stdout // will be empty and we will retry - } - klog.Infof("Upgrade Hack: fetchLBNames: stdout - %s : stderr - %s : err %v", stdout, stderr, err) - return stdout -} - -// lbExists returns true if the OVN load balancer for the corresponding namespace/name -// was created -func lbExists(lbNames, namespace, name string) bool { - stitchedServiceName := "Service_" + namespace + "/" + name - match := strings.Contains(lbNames, stitchedServiceName) - klog.Infof("Upgrade Hack: lbExists for service - %s/%s/%s : match - %v", - namespace, name, stitchedServiceName, match) - return match -} - -func portExists(namespace, name string) bool { - lspName := fmt.Sprintf("logical_port=%s", util.GetLogicalPortName(namespace, name)) - stdout, stderr, err := util.RunOVNSbctl("--bare", "--columns", "_uuid", "find", "Port_Binding", lspName) - klog.Infof("Upgrade Hack: portExists for pod - %s/%s : stdout - %s : stderr - %s", namespace, name, stdout, stderr) - return err == nil && stderr == "" && stdout != "" -} - -/** HACK END **/ - // Init executes the first steps to start the DefaultNodeNetworkController. // It is split from Start() and executed before UserDefinedNodeNetworkController (UDNNC) // to allow UDNNC to reference the openflow manager created in Init. @@ -923,12 +869,9 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { return fmt.Errorf("timed out waiting for the node zone %s to match the OVN Southbound db zone, err: %v, err1: %v", config.Default.Zone, err, err1) } - // if its nonIC OR IC=true and if its phase1 OR if its IC to IC upgrades - if !config.OVNKubernetesFeature.EnableInterconnect || sbZone == types.OvnDefaultZone || util.HasNodeMigratedZone(node) { // if its nonIC or if its phase1 - for _, auth := range []config.OvnAuthConfig{config.OvnNorth, config.OvnSouth} { - if err := auth.SetDBAuth(); err != nil { - return err - } + for _, auth := range []config.OvnAuthConfig{config.OvnNorth, config.OvnSouth} { + if err := auth.SetDBAuth(); err != nil { + return err } } @@ -1069,7 +1012,6 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Infof("Starting the default node network controller") var err error - var node *corev1.Node if nc.mgmtPortController == nil { return fmt.Errorf("default node network controller hasn't been pre-started") @@ -1082,11 +1024,6 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Errorf("Setting klog \"loglevel\" to 5 failed, err: %v", err) } - if node, err = nc.watchFactory.GetNode(nc.name); err != nil { - return fmt.Errorf("error retrieving node %s: %v", nc.name, err) - } - - nodeAnnotator := kube.NewNodeAnnotator(nc.Kube, node.Name) waiter := newStartupWaiter() // Complete gateway initialization @@ -1114,125 +1051,6 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { } } - /** HACK BEGIN **/ - // TODO(tssurya): Remove this HACK a few months from now. This has been added only to - // minimize disruption for upgrades when moving to interconnect=true. - // We want the legacy ovnkube-master to wait for remote ovnkube-node to - // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before - // considering a node as remote when we upgrade from "global" (1 zone IC) - // zone to multi-zone. This is so that network disruption for the existing workloads - // is negligible and until the point where ovnkube-node flips the switch to connect - // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure - // OVN/OVS flows are intact. - // STEP1: ovnkube-node start's up in remote zone and sets the "k8s.ovn.org/zone-name" above. - // STEP2: We delay the flip of connection for ovnkube-node(ovn-controller) to the new remote SBDB - // until the new remote ovnkube-controller has finished programming all the K8s core objects - // like routes, services and pods. Until then the ovnkube-node will talk to legacy SBDB. - // STEP3: Once we get the signal that the new SBDB is ready, we set the "k8s.ovn.org/remote-zone-migrated" annotation - // STEP4: We call setDBAuth to now point to new SBDB - // STEP5: Legacy ovnkube-master sees "k8s.ovn.org/remote-zone-migrated" annotation on this node and now knows that - // this node has remote-zone-migrated successfully and tears down old setup and creates new IC resource - // plumbing (takes 80ms based on what we saw in CI runs so we might still have that small window of disruption). - // NOTE: ovnkube-node in DPU host mode doesn't go through upgrades for OVN-IC and has no SBDB to connect to. Thus this part shall be skipped. - var syncNodes, syncServices, syncPods bool - if config.OvnKubeNode.Mode != types.NodeModeDPUHost && config.OVNKubernetesFeature.EnableInterconnect && nc.sbZone != types.OvnDefaultZone && !util.HasNodeMigratedZone(node) { - klog.Info("Upgrade Hack: Interconnect is enabled") - var err1 error - start := time.Now() - err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 300*time.Second, true, func(_ context.Context) (bool, error) { - // we loop through all the nodes in the cluster and ensure ovnkube-controller has finished creating the LRSR required for pod2pod overlay communication - if !syncNodes { - nodes, err := nc.watchFactory.GetNodes() - if err != nil { - err1 = fmt.Errorf("upgrade hack: error retrieving node %s: %v", nc.name, err) - return false, nil - } - for _, node := range nodes { - node := *node - if nc.name != node.Name && util.GetNodeZone(&node) != config.Default.Zone && !util.NoHostSubnet(&node) { - nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(&node, types.DefaultNetworkName) - if err != nil { - if util.IsAnnotationNotSetError(err) { - klog.Infof("Skipping node %q. k8s.ovn.org/node-subnets annotation was not found", node.Name) - continue - } - err1 = fmt.Errorf("unable to fetch node-subnet annotation for node %s: err, %v", node.Name, err) - return false, nil - } - for _, nodeSubnet := range nodeSubnets { - klog.Infof("Upgrade Hack: node %s, subnet %s", node.Name, nodeSubnet) - if !checkOVNSBNodeLRSR(nodeSubnet) { - err1 = fmt.Errorf("upgrade hack: unable to find LRSR for node %s", node.Name) - return false, nil - } - } - } - } - klog.Infof("Upgrade Hack: Syncing nodes took %v", time.Since(start)) - syncNodes = true - } - // we loop through all existing services in the cluster and ensure ovnkube-controller has finished creating LoadBalancers required for services to work - if !syncServices { - services, err := nc.watchFactory.GetServices() - if err != nil { - err1 = fmt.Errorf("upgrade hack: error retrieving the services %v", err) - return false, nil - } - lbNames := fetchLBNames() - for _, s := range services { - // don't process headless service - if !util.ServiceTypeHasClusterIP(s) || !util.IsClusterIPSet(s) { - continue - } - if !lbExists(lbNames, s.Namespace, s.Name) { - return false, nil - } - } - klog.Infof("Upgrade Hack: Syncing services took %v", time.Since(start)) - syncServices = true - } - if !syncPods { - pods, err := nc.watchFactory.GetAllPods() - if err != nil { - err1 = fmt.Errorf("upgrade hack: error retrieving the services %v", err) - return false, nil - } - for _, p := range pods { - if !util.PodScheduled(p) || util.PodCompleted(p) || util.PodWantsHostNetwork(p) { - continue - } - if p.Spec.NodeName != nc.name { - // remote pod - continue - } - if !portExists(p.Namespace, p.Name) { - return false, nil - } - } - klog.Infof("Upgrade Hack: Syncing pods took %v", time.Since(start)) - syncPods = true - } - return true, nil - }) - if err != nil { - return fmt.Errorf("upgrade hack: failed while waiting for the remote ovnkube-controller to be ready: %v, %v", err, err1) - } - if err := util.SetNodeZoneMigrated(nodeAnnotator, nc.sbZone); err != nil { - return fmt.Errorf("upgrade hack: failed to set node zone annotation for node %s: %w", nc.name, err) - } - if err := nodeAnnotator.Run(); err != nil { - return fmt.Errorf("upgrade hack: failed to set node %s annotations: %w", nc.name, err) - } - klog.Infof("ovnkube-node %s finished annotating node with remote-zone-migrated; took: %v", nc.name, time.Since(start)) - for _, auth := range []config.OvnAuthConfig{config.OvnNorth, config.OvnSouth} { - if err := auth.SetDBAuth(); err != nil { - return fmt.Errorf("upgrade hack: Unable to set the authentication towards OVN local dbs") - } - } - klog.Infof("Upgrade hack: ovnkube-node %s finished setting DB Auth; took: %v", nc.name, time.Since(start)) - } - /** HACK END **/ - // Wait for management port and gateway resources to be created by the master klog.Infof("Waiting for gateway and management port readiness...") start := time.Now() diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index ea72526b10..c6a3408e27 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -950,20 +950,6 @@ func (bnc *BaseNetworkController) GetLocalZoneNodes() ([]*corev1.Node, error) { // isLocalZoneNode returns true if the node is part of the local zone. func (bnc *BaseNetworkController) isLocalZoneNode(node *corev1.Node) bool { - /** HACK BEGIN **/ - // TODO(tssurya): Remove this HACK a few months from now. This has been added only to - // minimize disruption for upgrades when moving to interconnect=true. - // We want the legacy ovnkube-master to wait for remote ovnkube-node to - // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before - // considering a node as remote when we upgrade from "global" (1 zone IC) - // zone to multi-zone. This is so that network disruption for the existing workloads - // is negligible and until the point where ovnkube-node flips the switch to connect - // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure - // OVN/OVS flows are intact. - if bnc.zone == types.OvnDefaultZone { - return !util.HasNodeMigratedZone(node) - } - /** HACK END **/ return util.GetNodeZone(node) == bnc.zone } diff --git a/go-controller/pkg/ovn/controller/services/node_tracker.go b/go-controller/pkg/ovn/controller/services/node_tracker.go index 341764904a..7079ba2081 100644 --- a/go-controller/pkg/ovn/controller/services/node_tracker.go +++ b/go-controller/pkg/ovn/controller/services/node_tracker.go @@ -56,10 +56,6 @@ type nodeInfo struct { // The node's zone zone string - /** HACK BEGIN **/ - // has the node migrated to remote? - migrated bool - /** HACK END **/ // The list of node's management IPs mgmtIPs []net.IP @@ -127,7 +123,6 @@ func (nt *nodeTracker) Start(nodeInformer coreinformers.NodeInformer) (cache.Res oldObj.Name != newObj.Name || util.NodeHostCIDRsAnnotationChanged(oldObj, newObj) || util.NodeZoneAnnotationChanged(oldObj, newObj) || - util.NodeMigratedZoneAnnotationChanged(oldObj, newObj) || util.NoHostSubnet(oldObj) != util.NoHostSubnet(newObj) { nt.updateNode(newObj) } @@ -154,7 +149,7 @@ func (nt *nodeTracker) Start(nodeInformer coreinformers.NodeInformer) (cache.Res // updateNodeInfo updates the node info cache, and syncs all services // if it changed. -func (nt *nodeTracker) updateNodeInfo(nodeName, switchName, routerName, chassisID string, l3gatewayAddresses, hostAddresses []net.IP, podSubnets []*net.IPNet, mgmtIPs []net.IP, zone string, nodePortDisabled, migrated bool) { +func (nt *nodeTracker) updateNodeInfo(nodeName, switchName, routerName, chassisID string, l3gatewayAddresses, hostAddresses []net.IP, podSubnets []*net.IPNet, mgmtIPs []net.IP, zone string, nodePortDisabled bool) { ni := nodeInfo{ name: nodeName, l3gatewayAddresses: l3gatewayAddresses, @@ -166,7 +161,6 @@ func (nt *nodeTracker) updateNodeInfo(nodeName, switchName, routerName, chassisI chassisID: chassisID, nodePortDisabled: nodePortDisabled, zone: zone, - migrated: migrated, } for i := range podSubnets { ni.podSubnets = append(ni.podSubnets, *podSubnets[i]) // de-pointer @@ -275,7 +269,6 @@ func (nt *nodeTracker) updateNode(node *corev1.Node) { mgmtIPs, util.GetNodeZone(node), !nodePortEnabled, - util.HasNodeMigratedZone(node), ) } @@ -285,24 +278,6 @@ func (nt *nodeTracker) updateNode(node *corev1.Node) { func (nt *nodeTracker) getZoneNodes() []nodeInfo { out := make([]nodeInfo, 0, len(nt.nodes)) for _, node := range nt.nodes { - /** HACK BEGIN **/ - // TODO(tssurya): Remove this HACK a few months from now. This has been added only to - // minimize disruption for upgrades when moving to interconnect=true. - // We want the legacy ovnkube-master to wait for remote ovnkube-node to - // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before - // considering a node as remote when we upgrade from "global" (1 zone IC) - // zone to multi-zone. This is so that network disruption for the existing workloads - // is negligible and until the point where ovnkube-node flips the switch to connect - // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure - // OVN/OVS flows are intact. Legacy ovnkube-master must not delete the service load - // balancers for this node till it has finished migration - if nt.zone == types.OvnDefaultZone { - if !node.migrated { - out = append(out, node) - } - continue - } - /** HACK END **/ if node.zone == nt.zone { out = append(out, node) } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 194bb9989a..259df69dd9 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -1145,20 +1145,6 @@ func (e *EgressIPController) isPodScheduledinLocalZone(pod *corev1.Pod) bool { // isLocalZoneNode returns true if the node is part of the local zone. func (e *EgressIPController) isLocalZoneNode(node *corev1.Node) bool { - /** HACK BEGIN **/ - // TODO(tssurya): Remove this HACK a few months from now. This has been added only to - // minimize disruption for upgrades when moving to interconnect=true. - // We want the legacy ovnkube-master to wait for remote ovnkube-node to - // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before - // considering a node as remote when we upgrade from "global" (1 zone IC) - // zone to multi-zone. This is so that network disruption for the existing workloads - // is negligible and until the point where ovnkube-node flips the switch to connect - // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure - // OVN/OVS flows are intact. - if e.zone == types.OvnDefaultZone { - return !util.HasNodeMigratedZone(node) - } - /** HACK END **/ return util.GetNodeZone(node) == e.zone } diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index cb15a3de1d..d788141a01 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -163,9 +163,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s\"}", ni.transitPortIP), // used only for ic=true test "k8s.ovn.org/zone-name": ni.zone, } - if ni.zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = "" - } nodes = append(nodes, getNodeObj(fmt.Sprintf("node%d", nodeSuffix), annotations, map[string]string{})) nodeSuffix = nodeSuffix + 1 } @@ -195,9 +192,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv6\":\"%s\"}", ni.transitPortIP), // used only for ic=true test "k8s.ovn.org/zone-name": ni.zone, } - if ni.zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = "" - } nodes = append(nodes, getNodeObj(fmt.Sprintf("node%d", nodeSuffix), annotations, map[string]string{})) nodeSuffix = nodeSuffix + 1 } @@ -1816,12 +1810,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/egress-assignable": "", } node2 := nodes[1] - if node1Zone != "global" { - node1.Annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test - } - if node2Zone != "global" { - node2.Annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test - } + egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) egressNamespace := newNamespace(eipNamespace) @@ -2623,9 +2612,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/zone-name": node1Zone, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4), } - if node1Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test - } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", } @@ -2637,9 +2623,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/zone-name": node2Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4), } - if node2Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test - } labels = map[string]string{} node2 := getNodeObj(node2Name, annotations, labels) @@ -3430,7 +3413,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\"}", v4Node1Subnet, v6Node1Subnet), "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.2/16\", \"ipv6\": \"fd97::2/64\"}", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIPv4), - "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/zone-name": "global", } node := getNodeObj(node1Name, annotations, map[string]string{}) // add node to avoid errori-ing out on transit switch IP fetch fakeOvn.startWithDBSetup( @@ -3619,7 +3602,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" } if !isnode1Local { annotations["k8s.ovn.org/zone-name"] = "remote" - annotations["k8s.ovn.org/remote-zone-migrated"] = "remote" // used only for ic=true test } node1 := getNodeObj(node1Name, annotations, map[string]string{}) // add node to avoid errori-ing out on transit switch IP fetch @@ -3636,7 +3618,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" if !isnode2Local { annotations["k8s.ovn.org/zone-name"] = "remote" - annotations["k8s.ovn.org/remote-zone-migrated"] = "remote" // used only for ic=true test } node2 := getNodeObj(node2Name, annotations, map[string]string{}) // add node to avoid errori-ing out on transit switch IP fetch dynamicNeighRouters := "true" @@ -4777,9 +4758,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" // pod lives on node 1, therefore set the zone node1 := newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:feff:c0a8:8e0c/64") node1.Annotations["k8s.ovn.org/zone-name"] = podZone - if podZone != "global" { - node1.Annotations["k8s.ovn.org/remote-zone-migrated"] = podZone // used only for ic=true test - } _, node1Subnet, _ := net.ParseCIDR(v6Node1Subnet) _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) dynamicNeighRouters := "true" @@ -5015,9 +4993,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" // pod is host by node 1 therefore we set its zone node1 := newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:fedf:c0a8:8e0c/64") node1.Annotations["k8s.ovn.org/zone-name"] = podZone - if podZone != "global" { - node1.Annotations["k8s.ovn.org/remote-zone-migrated"] = podZone // used only for ic=true test - } _, node1Subnet, _ := net.ParseCIDR(v6Node1Subnet) _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) dynamicNeighRouters := "true" @@ -5272,9 +5247,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" // pod is hosted by node 1 therefore we set its zone node1 := newNodeGlobalZoneNotEgressableV6Only(node1Name, "0:0:0:0:0:feff:c0a8:8e0c/64") node1.Annotations["k8s.ovn.org/zone-name"] = podZone - if podZone != "global" { - node1.Annotations["k8s.ovn.org/remote-zone-migrated"] = podZone // used only for ic=true test - } _, node1Subnet, _ := net.ParseCIDR(v6Node1Subnet) _, node2Subnet, _ := net.ParseCIDR(v6Node2Subnet) egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets(nil, types.DefaultNetworkName, DefaultNetworkControllerName) @@ -5602,9 +5574,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } - if node1Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test - } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", } @@ -5617,9 +5586,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } - if node2Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test - } node2 := getNodeObj(node2Name, annotations, labels) _, node2Subnet, _ := net.ParseCIDR(v4Node2Subnet) @@ -7534,9 +7500,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/zone-name": node1Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), } - if node1Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test - } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", } @@ -7550,9 +7513,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/zone-name": node2Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), } - if node2Zone != "global" { - annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test - } node2 := getNodeObj(node2Name, annotations, map[string]string{}) eIP1 := egressipv1.EgressIP{ ObjectMeta: newEgressIPMeta(egressIPName), @@ -11676,7 +11636,6 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" } if isPodRemote { annotations["k8s.ovn.org/zone-name"] = "remote" - annotations["k8s.ovn.org/remote-zone-migrated"] = "remote" } node2 := getNodeObj(node2Name, annotations, map[string]string{}) diff --git a/go-controller/pkg/ovn/egressip_udn_l2_test.go b/go-controller/pkg/ovn/egressip_udn_l2_test.go index 672fb2143b..e878593da6 100644 --- a/go-controller/pkg/ovn/egressip_udn_l2_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l2_test.go @@ -534,7 +534,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -550,7 +549,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1040,7 +1038,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1056,7 +1053,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1522,7 +1518,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1538,7 +1533,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1886,7 +1880,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -1902,7 +1895,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -2239,7 +2231,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -2255,7 +2246,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -2607,7 +2597,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OvnNodeID: "1", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, @@ -2623,7 +2612,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OvnNodeID: "2", util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index c5698b8d08..2b23fc88d0 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -543,7 +543,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } @@ -556,7 +555,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } @@ -1064,7 +1062,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } @@ -1077,7 +1074,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } @@ -1800,7 +1796,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } @@ -1813,7 +1808,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } @@ -2170,7 +2164,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } @@ -2183,7 +2176,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } @@ -2531,7 +2523,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, - "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), util.OvnNodeID: "2", } @@ -2544,7 +2535,6 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, - "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), util.OvnNodeID: "3", } diff --git a/go-controller/pkg/ovn/multipolicy_test.go b/go-controller/pkg/ovn/multipolicy_test.go index d36206b2f2..64df9d5a04 100644 --- a/go-controller/pkg/ovn/multipolicy_test.go +++ b/go-controller/pkg/ovn/multipolicy_test.go @@ -571,7 +571,6 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { if remote { config.OVNKubernetesFeature.EnableInterconnect = true node.Annotations["k8s.ovn.org/zone-name"] = "remote" - node.Annotations["k8s.ovn.org/remote-zone-migrated"] = "remote" node.Annotations, err = util.UpdateNetworkIDAnnotation(node.Annotations, ovntypes.DefaultNetworkName, 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if topology != ovntypes.LocalnetTopology { diff --git a/go-controller/pkg/ovnwebhook/nodeadmission.go b/go-controller/pkg/ovnwebhook/nodeadmission.go index 15b98db2fc..4312131c49 100644 --- a/go-controller/pkg/ovnwebhook/nodeadmission.go +++ b/go-controller/pkg/ovnwebhook/nodeadmission.go @@ -57,18 +57,6 @@ var commonNodeAnnotationChecks = map[string]checkNodeAnnot{ util.OVNNodeEncapIPs: nil, } -// interconnectNodeAnnotationChecks holds annotations allowed for ovnkube-node: users in IC environments -var interconnectNodeAnnotationChecks = map[string]checkNodeAnnot{ - util.OvnNodeMigratedZoneName: func(v annotationChange, nodeName string) error { - // it is allowed for the annotation to be set to - if (v.action == added || v.action == changed) && v.value == nodeName { - return nil - } - - return fmt.Errorf("%s can only be set to %s, it cannot be removed", util.OvnNodeMigratedZoneName, nodeName) - }, -} - // hybridOverlayNodeAnnotationChecks holds annotations allowed for ovnkube-node: users hybrid overlay environments var hybridOverlayNodeAnnotationChecks = map[string]checkNodeAnnot{ hotypes.HybridOverlayDRMAC: nil, @@ -81,12 +69,9 @@ type NodeAdmission struct { extraAllowedUsers sets.Set[string] } -func NewNodeAdmissionWebhook(enableInterconnect, enableHybridOverlay bool, extraAllowedUsers ...string) *NodeAdmission { +func NewNodeAdmissionWebhook(enableHybridOverlay bool, extraAllowedUsers ...string) *NodeAdmission { checks := make(map[string]checkNodeAnnot) maps.Copy(checks, commonNodeAnnotationChecks) - if enableInterconnect { - maps.Copy(checks, interconnectNodeAnnotationChecks) - } if enableHybridOverlay { maps.Copy(checks, hybridOverlayNodeAnnotationChecks) } diff --git a/go-controller/pkg/ovnwebhook/nodeadmission_test.go b/go-controller/pkg/ovnwebhook/nodeadmission_test.go index 53975940b3..3f9c227d0b 100644 --- a/go-controller/pkg/ovnwebhook/nodeadmission_test.go +++ b/go-controller/pkg/ovnwebhook/nodeadmission_test.go @@ -21,28 +21,19 @@ import ( ) func TestNewNodeAdmissionWebhook(t *testing.T) { - icAnnotations := make(map[string]checkNodeAnnot) - maps.Copy(icAnnotations, commonNodeAnnotationChecks) - maps.Copy(icAnnotations, interconnectNodeAnnotationChecks) hoAnnotations := make(map[string]checkNodeAnnot) maps.Copy(hoAnnotations, commonNodeAnnotationChecks) maps.Copy(hoAnnotations, hybridOverlayNodeAnnotationChecks) tests := []struct { name string - enableInterconnect bool enableHybridOverlay bool expectedKeys []string }{ { - name: "should only contain common annotation in non-IC", + name: "should only contain common annotations", expectedKeys: maps.Keys(commonNodeAnnotationChecks), }, - { - name: "should contain common and IC annotations in IC", - enableInterconnect: true, - expectedKeys: maps.Keys(icAnnotations), - }, { name: "should contain common and hybrid overlay annotations in hybrid overlay ", enableHybridOverlay: true, @@ -51,7 +42,7 @@ func TestNewNodeAdmissionWebhook(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := NewNodeAdmissionWebhook(tt.enableInterconnect, tt.enableHybridOverlay); !got.annotationKeys.HasAll(tt.expectedKeys...) { + if got := NewNodeAdmissionWebhook(tt.enableHybridOverlay); !got.annotationKeys.HasAll(tt.expectedKeys...) { t.Errorf("NewNodeAdmissionWebhook() = %v, want %v", got.annotationKeys, tt.expectedKeys) } }) @@ -64,7 +55,7 @@ var additionalNamePrefix = "system:foobar" var additionalUserName = fmt.Sprintf("%s:%s", additionalNamePrefix, nodeName) func TestNodeAdmission_ValidateUpdate(t *testing.T) { - adm := NewNodeAdmissionWebhook(false, false) + adm := NewNodeAdmissionWebhook(false) tests := []struct { name string ctx context.Context @@ -403,49 +394,8 @@ func TestNodeAdmission_ValidateUpdate(t *testing.T) { }) } } -func TestNodeAdmission_ValidateUpdateIC(t *testing.T) { - adm := NewNodeAdmissionWebhook(true, false) - tests := []struct { - name string - ctx context.Context - oldObj runtime.Object - newObj runtime.Object - expectedErr error - }{ - { - name: "ovnkube-node cannot set util.OvnNodeMigratedZoneName to anything else than ", - ctx: admission.NewContextWithRequest(context.TODO(), admission.Request{ - AdmissionRequest: v1.AdmissionRequest{UserInfo: authenticationv1.UserInfo{ - Username: userName, - }}, - }), - oldObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - }, - newObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Annotations: map[string]string{util.OvnNodeMigratedZoneName: "global"}, - }, - }, - expectedErr: fmt.Errorf("user: %q is not allowed to set %s on node %q: %s can only be set to %s, it cannot be removed", userName, util.OvnNodeMigratedZoneName, nodeName, util.OvnNodeMigratedZoneName, nodeName), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := adm.ValidateUpdate(tt.ctx, tt.oldObj, tt.newObj) - if err != tt.expectedErr && err.Error() != tt.expectedErr.Error() { - t.Errorf("ValidateUpdateIC() error = %v, wantErr %v", err, tt.expectedErr) - return - } - }) - } -} - func TestNodeAdmission_ValidateUpdateHybridOverlay(t *testing.T) { - adm := NewNodeAdmissionWebhook(false, true) + adm := NewNodeAdmissionWebhook(true) tests := []struct { name string ctx context.Context @@ -502,64 +452,3 @@ func TestNodeAdmission_ValidateUpdateHybridOverlay(t *testing.T) { }) } } - -func TestNodeAdmission_ValidateUpdateExtraUsers(t *testing.T) { - extraUser := "system:serviceaccount:ovnkube-cluster-manager" - adm := NewNodeAdmissionWebhook(true, false, extraUser) - tests := []struct { - name string - ctx context.Context - oldObj runtime.Object - newObj runtime.Object - expectedErr error - }{ - { - name: "extra user cannot set util.OvnNodeMigratedZoneName to anything else than ", - ctx: admission.NewContextWithRequest(context.TODO(), admission.Request{ - AdmissionRequest: v1.AdmissionRequest{UserInfo: authenticationv1.UserInfo{ - Username: extraUser, - }}, - }), - oldObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - }, - newObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Annotations: map[string]string{util.OvnNodeMigratedZoneName: "global"}, - }, - }, - expectedErr: fmt.Errorf("user: %q is not allowed to set %s on node %q: %s can only be set to %s, it cannot be removed", extraUser, util.OvnNodeMigratedZoneName, nodeName, util.OvnNodeMigratedZoneName, nodeName), - }, - { - name: "extra user can set util.OvnNodeMigratedZoneName to ", - ctx: admission.NewContextWithRequest(context.TODO(), admission.Request{ - AdmissionRequest: v1.AdmissionRequest{UserInfo: authenticationv1.UserInfo{ - Username: extraUser, - }}, - }), - oldObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - }, - newObj: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - Annotations: map[string]string{util.OvnNodeMigratedZoneName: nodeName}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := adm.ValidateUpdate(tt.ctx, tt.oldObj, tt.newObj) - if err != tt.expectedErr && err.Error() != tt.expectedErr.Error() { - t.Errorf("ValidateUpdateIC() error = %v, wantErr %v", err, tt.expectedErr) - return - } - }) - } -} diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index 5e77a26acf..890019f78b 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -116,22 +116,6 @@ const ( // ovnkube-node gets the node's zone from the OVN Southbound database. OvnNodeZoneName = "k8s.ovn.org/zone-name" - /** HACK BEGIN **/ - // TODO(tssurya): Remove this annotation a few months from now (when one or two release jump - // upgrades are done). This has been added only to minimize disruption for upgrades when - // moving to interconnect=true. - // We want the legacy ovnkube-master to wait for remote ovnkube-node to - // signal it using "k8s.ovn.org/remote-zone-migrated" annotation before - // considering a node as remote when we upgrade from "global" (1 zone IC) - // zone to multi-zone. This is so that network disruption for the existing workloads - // is negligible and until the point where ovnkube-node flips the switch to connect - // to the new SBDB, it would continue talking to the legacy RAFT ovnkube-sbdb to ensure - // OVN/OVS flows are intact. - // OvnNodeMigratedZoneName is the zone to which the node belongs to. It is set by ovnkube-node. - // ovnkube-node gets the node's zone from the OVN Southbound database. - OvnNodeMigratedZoneName = "k8s.ovn.org/remote-zone-migrated" - /** HACK END **/ - // OvnTransitSwitchPortAddr is the annotation to store the node Transit switch port ips. // It is set by cluster manager. OvnTransitSwitchPortAddr = "k8s.ovn.org/node-transit-switch-port-ifaddr" @@ -1105,26 +1089,6 @@ func SetNodeZone(nodeAnnotator kube.Annotator, zoneName string) error { return nodeAnnotator.Set(OvnNodeZoneName, zoneName) } -/** HACK BEGIN **/ -// TODO(tssurya): Remove this a few months from now -// SetNodeZoneMigrated sets the node's zone in the 'ovnNodeMigratedZoneName' node annotation. -func SetNodeZoneMigrated(nodeAnnotator kube.Annotator, zoneName string) error { - return nodeAnnotator.Set(OvnNodeMigratedZoneName, zoneName) -} - -// HasNodeMigratedZone returns true if node has its ovnNodeMigratedZoneName set already -func HasNodeMigratedZone(node *corev1.Node) bool { - _, ok := node.Annotations[OvnNodeMigratedZoneName] - return ok -} - -// NodeMigratedZoneAnnotationChanged returns true if the ovnNodeMigratedZoneName annotation changed for the node -func NodeMigratedZoneAnnotationChanged(oldNode, newNode *corev1.Node) bool { - return oldNode.Annotations[OvnNodeMigratedZoneName] != newNode.Annotations[OvnNodeMigratedZoneName] -} - -/** HACK END **/ - // GetNodeZone returns the zone of the node set in the 'ovnNodeZoneName' node annotation. // If the annotation is not set, it returns the 'default' zone name. func GetNodeZone(node *corev1.Node) string { From 21444cc390404e8bc89ae9f5ae4158fcfe98afc6 Mon Sep 17 00:00:00 2001 From: Riccardo Ravaioli Date: Wed, 28 Jan 2026 17:07:02 +0100 Subject: [PATCH 2/3] Fix IC cluster cleanup tests zone configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The layer2 UDN cleanup tests for IC clusters were failing because of a zone mismatch between the controller and the test node: - Controller zone: read from NBGlobal.Name ("global") - Node zone: set via annotation ("test" when IC enabled) This mismatch was previously masked in two spots: 1. The HACK in isLocalZoneNode() (removed by commit 7d408c1ca): When the controller's zone was "global" (the default), the HACK bypassed the zone comparison entirely and instead checked whether the node had a migration annotation. Since the test node had no migration annotation, it was treated as local despite the zone mismatch. 2. Unconditional gateway cleanup in deleteNodeEvent (changed by commit 8725a93dc to only cleanup nodes tracked in localZoneNodes) With both items above removed/changed, the test correctly fails because the node is treated as remote (zones don't match), so it's not added to localZoneNodes, and cleanup is skipped. Fix the test by: - using setupConfig() to set config.Default.Zone to testICZone when IC is enabled - setting NBGlobal.Name to config.Default.Zone (which setupConfig() already configured correctly) This ensures the controller and node are in the same zone, so the node is correctly treated as local and its gateway entities are cleaned up. 🤖 Assisted by [Claude Code](https://claude.com/claude-code) Signed-off-by: Riccardo Ravaioli (cherry picked from commit acb088cc3d82adcd80bcd2d5a4c1209c5c44871b) (cherry picked from commit e950ad5cdca6258b927daf5270423d3d775d4715) (cherry picked from commit c8ada1f6f993e9fc0ff04b1883c6189c2085d2d1) (cherry picked from commit d1bc30cbc549118a86f39fa78b4c3989ddc9563c) --- .../layer2_user_defined_network_controller_test.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go b/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go index 352ef1497f..ba06284c1d 100644 --- a/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go +++ b/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go @@ -348,13 +348,8 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { "user-defined network controller DB entities are properly cleaned up", func(netInfo userDefinedNetInfo, testConfig testConfiguration) { podInfo := dummyTestPod(ns, netInfo) - if testConfig.configToOverride != nil { - config.OVNKubernetesFeature = *testConfig.configToOverride - if testConfig.gatewayConfig != nil { - config.Gateway.DisableSNATMultipleGWs = testConfig.gatewayConfig.DisableSNATMultipleGWs - } - config.OVNKubernetesFeature.EnableMultiNetwork = true - } + setupConfig(netInfo, testConfig, config.GatewayModeShared) + config.OVNKubernetesFeature.EnableMultiNetwork = true app.Action = func(*cli.Context) error { netConf := netInfo.netconf() networkConfig, err := util.NewNetInfo(netConf) @@ -379,7 +374,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) Expect(err).NotTo(HaveOccurred()) Expect(gwConfig.NextHops).NotTo(BeEmpty()) - nbZone := &nbdb.NBGlobal{Name: ovntypes.OvnDefaultZone, UUID: ovntypes.OvnDefaultZone} + nbZone := &nbdb.NBGlobal{Name: config.Default.Zone, UUID: config.Default.Zone} n := newNamespace(ns) if netInfo.isPrimary { From 6a945d5a5a6040600c9e3567381fe5bd1a2d484b Mon Sep 17 00:00:00 2001 From: Aswin Suryanarayanan Date: Thu, 11 Dec 2025 13:50:33 -0500 Subject: [PATCH 3/3] nodeallocator: fix subnet leak when hybrid overlay is enabled When the hybrid overlay feature is enabled (specifically when hybrid overlay cluster subnets are configured), the HandleDeleteNode function would return early after releasing the hybrid overlay subnet. This caused the regular cluster subnets allocated to the node to never be released, leading to a subnet leak that eventually exhausts the cluster CIDR pool. This commit fixes the issue by removing the early return, ensuring that both the hybrid overlay subnets and the standard node subnets are properly released upon node deletion. A new test case TestNodeAllocator_HandleDeleteNode is added to verify that both types of subnets are correctly released. Signed-off-by: Aswin Suryanarayanan (cherry picked from commit c44cbbfbfc1581081387a11d9f3fd17206cdf14d) (cherry picked from commit 782634450ca5ec785c404dd11cb27a3b5391f834) (cherry picked from commit 26f2508437b0667a8b9f724a7fda885b2098c44a) (cherry picked from commit 28f5871b0d0d912f4cad5a7242e8c5ee8ed9c296) --- .../pkg/clustermanager/node/node_allocator.go | 1 - .../node/node_allocator_test.go | 108 +++++++++++++++++- 2 files changed, 105 insertions(+), 4 deletions(-) diff --git a/go-controller/pkg/clustermanager/node/node_allocator.go b/go-controller/pkg/clustermanager/node/node_allocator.go index e31625b725..5f1ba6ccc5 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator.go +++ b/go-controller/pkg/clustermanager/node/node_allocator.go @@ -343,7 +343,6 @@ func (na *NodeAllocator) syncNodeNetworkAnnotations(node *corev1.Node) error { func (na *NodeAllocator) HandleDeleteNode(node *corev1.Node) error { if na.hasHybridOverlayAllocation() { na.releaseHybridOverlayNodeSubnet(node.Name) - return nil } if na.hasNodeSubnetAllocation() || na.hasHybridOverlayAllocationUnmanaged() { diff --git a/go-controller/pkg/clustermanager/node/node_allocator_test.go b/go-controller/pkg/clustermanager/node/node_allocator_test.go index 37fee60d64..acdbc137bb 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator_test.go +++ b/go-controller/pkg/clustermanager/node/node_allocator_test.go @@ -12,7 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/listers/core/v1" + listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" @@ -400,12 +400,12 @@ func TestController_allocateNodeSubnets_ReleaseOnError(t *testing.T) { } } -func newFakeNodeLister(nodes []*corev1.Node) v1.NodeLister { +func newFakeNodeLister(nodes []*corev1.Node) listersv1.NodeLister { indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) for _, node := range nodes { _ = indexer.Add(node) } - return v1.NewNodeLister(indexer) + return listersv1.NewNodeLister(indexer) } func TestController_CleanupStaleAnnotation(t *testing.T) { @@ -448,3 +448,105 @@ func TestController_CleanupStaleAnnotation(t *testing.T) { t.Fatalf("Expected annotation %s to be cleaned up, got %v", util.OVNNodeGRLRPAddrs, nodes.Items[0].Annotations) } } + +// TestNodeAllocator_HandleDeleteNode verifies that HandleDeleteNode correctly releases +// both standard cluster subnets and hybrid overlay subnets (if enabled) when a node is deleted. +func TestNodeAllocator_HandleDeleteNode(t *testing.T) { + origHybridEnabled := config.HybridOverlay.Enabled + origHybridSubnets := config.HybridOverlay.ClusterSubnets + origClusterSubnets := config.Default.ClusterSubnets + origNoHostSubnetNodes := config.Kubernetes.NoHostSubnetNodes + t.Cleanup(func() { + config.HybridOverlay.Enabled = origHybridEnabled + config.HybridOverlay.ClusterSubnets = origHybridSubnets + config.Default.ClusterSubnets = origClusterSubnets + config.Kubernetes.NoHostSubnetNodes = origNoHostSubnetNodes + }) + + config.HybridOverlay.Enabled = true + config.HybridOverlay.ClusterSubnets = []config.CIDRNetworkEntry{ + {CIDR: ovntest.MustParseIPNet("10.0.0.0/16"), HostSubnetLength: 24}, + } + + ranges, err := rangesFromStrings([]string{"172.16.0.0/16"}, []int{24}) + if err != nil { + t.Fatal(err) + } + config.Default.ClusterSubnets = ranges + + netInfo, err := util.NewNetInfo( + &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{Name: types.DefaultNetworkName}, + }, + ) + if err != nil { + t.Fatal(err) + } + + na := &NodeAllocator{ + netInfo: netInfo, + clusterSubnetAllocator: NewSubnetAllocator(), + nodeLister: newFakeNodeLister([]*corev1.Node{}), + } + if na.hasHybridOverlayAllocation() { + na.hybridOverlaySubnetAllocator = NewSubnetAllocator() + } + + if !na.hasHybridOverlayAllocation() { + t.Fatal("Hybrid overlay allocation should be enabled given the test configuration") + } + + if err := na.Init(); err != nil { + t.Fatalf("Failed to initialize node allocator: %v", err) + } + + nodeName := "node-delete-test" + if !na.hasNodeSubnetAllocation() { + t.Fatal("Node subnet allocation should be enabled") + } + + allocated, _, err := na.allocateNodeSubnets(na.clusterSubnetAllocator, nodeName, nil, true, false) + if err != nil { + t.Fatalf("Failed to allocate subnet: %v", err) + } + if len(allocated) == 0 { + t.Fatal("No subnet allocated") + } + + v4used, _ := na.clusterSubnetAllocator.Usage() + if v4used != 1 { + t.Fatalf("Expected 1 allocated subnet, got %d", v4used) + } + + if na.hasHybridOverlayAllocation() { + if _, _, err := na.allocateNodeSubnets(na.hybridOverlaySubnetAllocator, nodeName, nil, true, false); err != nil { + t.Fatalf("Failed to allocate hybrid overlay subnet: %v", err) + } + hoUsed, _ := na.hybridOverlaySubnetAllocator.Usage() + if hoUsed != 1 { + t.Fatalf("Expected 1 allocated hybrid overlay subnet, got %d", hoUsed) + } + } + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + } + + if err := na.HandleDeleteNode(node); err != nil { + t.Fatalf("HandleDeleteNode failed: %v", err) + } + + v4usedAfter, _ := na.clusterSubnetAllocator.Usage() + if v4usedAfter != 0 { + t.Errorf("Subnet leak detected! Expected 0 allocated subnets, got %d", v4usedAfter) + } + + if na.hasHybridOverlayAllocation() { + hoUsedAfter, _ := na.hybridOverlaySubnetAllocator.Usage() + if hoUsedAfter != 0 { + t.Errorf("Hybrid overlay subnet leak detected! Expected 0 allocated subnets, got %d", hoUsedAfter) + } + } +}