From f559df117041c2df701113c37deb03cf37509753 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 17 Apr 2025 12:50:54 +0200 Subject: [PATCH 001/181] [docs] update ovn-observability with some more details Signed-off-by: Nadia Pinaeva --- docs/observability/ovn-observability.md | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/docs/observability/ovn-observability.md b/docs/observability/ovn-observability.md index 0507b9e725..d9e735a1b4 100644 --- a/docs/observability/ovn-observability.md +++ b/docs/observability/ovn-observability.md @@ -68,9 +68,11 @@ No API changes were done. ### OVN sampling details OVN has 3 main db tables that are used for sampling: -- `Sample_collector`: This table is used to define the sampling collector. It defines the sampling rate and collectorID, -which is used to set up collectors in the OVS. +- `Sample_collector`: This table is used to define the sampling collector. It defines the sampling rate via `Probability` field +and collectorID via `SetID` field, which is used to set up collectors in the OVS. - `Sampling_app`: This table is used to set `ID`s for existing OVN sampling applications, that are sent together with the samples. +There is a supported set of `Sampling_app` types, for example `acl-new` app is used to sample new connections matched by an ACL. +`Sampling_app.ID` is a way to identify the application that generated the sample. - `Sample`: This table is used to define required samples and point to the collectors. Every sample has `Metadata` that is sent together with the sample. @@ -84,15 +86,21 @@ that is decoded by `go-controller/observability-lib`. When one of the supported objects (for example, network policy) is created, ovn-kuberentes generates an nbdb `Sample` for it. To decode the samples into human-readable information, `go-controller/observability-lib` is used. It finds `Sample` -by the attached `Sample.Metadata` and then gets corresponding db object based on `Sampling_add.ID` and `Sample.UUID`. -The message is then constructed using db object `external_ids`. - -### Full stack architecture +by the attached `Sample.Metadata` and then gets corresponding db object (e.g. ACL) based on `Sampling_app.ID` and `Sample.UUID`. +The message is then constructed using db object (e.g. ACL) `external_ids`. ![ovnkube-observ](../images/ovnkube-observ.png) The diagram shows how all involved components (kernel, OVS, OVN, ovn-kubernetes) are connected. +#### Enabling collectors + +Currently, we have only 1 default collector with hard-coded ID, which is set via the `Sample_collector.SetID` field. +To make OVS start sending samples for an existing `Sample_collector`, a new OVSDB `Flow_Sample_Collector_Set` entry +needs to be created with `Flow_Sample_Collector_Set.ID` value of `Sample_collector.SetID`. +This is done by the `go-controller/observability-lib` and it is important to note that only one `Flow_Sample_Collector_Set` +should be created for a given `Sample_collector.SetID` value at a time. But if such entry already exists, it can be reused. + ## Best Practices TDB @@ -126,6 +134,9 @@ This applies to in both cases ANP will have only first-packet sample. +Use caution when running the `ovnkube-observe` tool. Currently it has poor resource management and consumes a lot of +CPU when many packets are sent. Tracked here https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5203 + ## References NONE From 3d104e66ddc897df115ce57a68aa240bfd03c53e Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 20 May 2025 15:55:08 -0400 Subject: [PATCH 002/181] Retry all pods smarter Related to investigating the root cause for: #5260. This commit removes adding pods that are not scheduled to the retry framework. When the pod is scheduled the controller will receive an event. Additionally these functions that add pods were using the kubeclient instead of informer cache. That means everytime a UDN was added we would issue kubeclient command to get all pods, which is really bad for performance. Signed-off-by: Tim Rozet --- .../network_cluster_controller.go | 2 +- .../pkg/ovn/base_network_controller.go | 13 ++++--- go-controller/pkg/retry/obj_retry.go | 36 ++++++++----------- 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index fde745ac00..9d9abb77d3 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -475,7 +475,7 @@ func (ncc *networkClusterController) Reconcile(netInfo util.NetInfo) error { klog.Errorf("Failed to reconcile network %s: %v", ncc.GetNetworkName(), err) } if reconcilePendingPods && ncc.retryPods != nil { - if err := objretry.RequeuePendingPods(ncc.kube, ncc.GetNetInfo(), ncc.retryPods); err != nil { + if err := objretry.RequeuePendingPods(ncc.watchFactory, ncc.GetNetInfo(), ncc.retryPods); err != nil { klog.Errorf("Failed to requeue pending pods for network %s: %v", ncc.GetNetworkName(), err) } } diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 1a3f8685e4..ae38418d70 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -14,8 +14,6 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -241,7 +239,7 @@ func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed f } if reconcilePendingPods { - if err := ovnretry.RequeuePendingPods(oc.kube, oc.GetNetInfo(), oc.retryPods); err != nil { + if err := ovnretry.RequeuePendingPods(oc.watchFactory, oc.GetNetInfo(), oc.retryPods); err != nil { klog.Errorf("Failed to requeue pending pods for network %s: %v", oc.GetNetworkName(), err) } } @@ -579,12 +577,10 @@ func (bnc *BaseNetworkController) deleteNodeLogicalNetwork(nodeName string) erro func (bnc *BaseNetworkController) addAllPodsOnNode(nodeName string) []error { errs := []error{} - pods, err := bnc.kube.GetPods(metav1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), - }) + pods, err := bnc.watchFactory.GetAllPods() if err != nil { errs = append(errs, err) - klog.Errorf("Unable to list existing pods on node: %s, existing pods on this node may not function", + klog.Errorf("Unable to list existing pods for synchronizing node: %s, existing pods on this node may not function", nodeName) } else { klog.V(5).Infof("When adding node %s for network %s, found %d pods to add to retryPods", nodeName, bnc.GetNetworkName(), len(pods)) @@ -593,6 +589,9 @@ func (bnc *BaseNetworkController) addAllPodsOnNode(nodeName string) []error { if util.PodCompleted(&pod) { continue } + if pod.Spec.NodeName != nodeName { + continue + } klog.V(5).Infof("Adding pod %s/%s to retryPods for network %s", pod.Namespace, pod.Name, bnc.GetNetworkName()) err = bnc.retryPods.AddRetryObjWithAddNoBackoff(&pod) if err != nil { diff --git a/go-controller/pkg/retry/obj_retry.go b/go-controller/pkg/retry/obj_retry.go index 5f9dfffb16..b6dd8ffa5e 100644 --- a/go-controller/pkg/retry/obj_retry.go +++ b/go-controller/pkg/retry/obj_retry.go @@ -9,14 +9,11 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/syncmap" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -771,38 +768,35 @@ func (r *RetryFramework) WatchResourceFiltered(namespaceForFilteredHandler strin return handler, nil } -// getPendingPods returns all pods that are in the Pending state -func getPendingPods(kubeClient kube.InterfaceOVN) ([]*corev1.Pod, error) { - var allPods []*corev1.Pod - - pods, err := kubeClient.GetPods(corev1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("status.phase", string(corev1.PodPending)).String(), - }) - if err != nil { - return nil, err - } - allPods = append(allPods, pods...) - return allPods, nil -} - // RequeuePendingPods enqueues all Pending pods into the retryPods associated with netInfo. -func RequeuePendingPods(kubeClient kube.InterfaceOVN, netInfo util.NetInfo, retryPods *RetryFramework) error { +func RequeuePendingPods(wf *factory.WatchFactory, netInfo util.NetInfo, retryPods *RetryFramework) error { var errs []error // NOTE: A pod may reference a NAD from a different namespace, so check all pending pods. - allPods, err := getPendingPods(kubeClient) + allPods, err := wf.GetAllPods() if err != nil { - return err + return fmt.Errorf("failed to get all pods: %w", err) } + podsAdded := false for _, pod := range allPods { pod := *pod + if !util.PodScheduled(&pod) { + continue + } + if pod.Status.Phase != corev1.PodPending { + continue + } klog.V(5).Infof("Adding pending pod %s/%s to retryPods for network %s", pod.Namespace, pod.Name, netInfo.GetNetworkName()) err := retryPods.AddRetryObjWithAddNoBackoff(&pod) if err != nil { errs = append(errs, err) + continue } + podsAdded = true + } + if podsAdded { + retryPods.RequestRetryObjs() } - retryPods.RequestRetryObjs() return utilerrors.Join(errs...) } From d31d1717da84a98b8feb08c261ec36c10cb416c5 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 20 May 2025 13:32:07 -0400 Subject: [PATCH 003/181] Configures ephemeral port range for OVN SNAT'ing There was a previous bug where when an egress packet would be SNAT'ed to the node IP, using a nodeport source port, it would cause reply traffic to get DNAT'ed to the nodeport load balancer. This happened because the egress connections were not conntracked correctly. This was fixed via: https://issues.redhat.com/browse/OCPBUGS-25889 https://issues.redhat.com/browse/FDP-291 However, that fix was not hardware offloadable. The ideal fix here would be to always commit to conntrack and have it be HW offloadable. Until we have a better solution, we can configure the port range for OVN to use on its SNAT. This applies to all SNATs for traffic that enters the local host or leaves the host. The new config option --ephemeral-port-range "-" can be used to specify the port range to use with OVN. If not provided, this value will be automatically derived from the ephemeral port range in /proc/sys/net/ipv4/ip_local_port_range, which is typically set already to avoid nodeport range conflicts. Signed-off-by: Tim Rozet --- go-controller/pkg/config/config.go | 34 +++++++++++++++++ go-controller/pkg/config/utils.go | 48 ++++++++++++++++++++++++ go-controller/pkg/libovsdb/ops/router.go | 6 ++- go-controller/pkg/ovn/gateway_test.go | 18 +++++++-- go-controller/pkg/ovn/namespace_test.go | 1 + 5 files changed, 102 insertions(+), 5 deletions(-) diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index c7df666cbc..7cd97479c4 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -38,6 +38,9 @@ const DefaultVXLANPort = 4789 const DefaultDBTxnTimeout = time.Second * 100 +// DefaultEphemeralPortRange is used for unit testing only +const DefaultEphemeralPortRange = "32768-60999" + // The following are global config parameters that other modules may access directly var ( // Build information. Populated at build-time. @@ -494,6 +497,10 @@ type GatewayConfig struct { DisableForwarding bool `gcfg:"disable-forwarding"` // AllowNoUplink (disabled by default) controls if the external gateway bridge without an uplink port is allowed in local gateway mode. AllowNoUplink bool `gcfg:"allow-no-uplink"` + // EphemeralPortRange is the range of ports used by egress SNAT operations in OVN. Specifically for NAT where + // the source IP of the NAT will be a shared Node IP address. If unset, the value will be determined by sysctl lookup + // for the kernel's ephemeral range: net.ipv4.ip_local_port_range. Format is "-". + EphemeralPortRange string `gfcg:"ephemeral-port-range"` } // OvnAuthConfig holds client authentication and location details for @@ -664,6 +671,9 @@ func PrepareTestConfig() error { Kubernetes.DisableRequestedChassis = false EnableMulticast = false Default.OVSDBTxnTimeout = 5 * time.Second + if Gateway.Mode != GatewayModeDisabled { + Gateway.EphemeralPortRange = DefaultEphemeralPortRange + } if err := completeConfig(); err != nil { return err @@ -1509,6 +1519,14 @@ var OVNGatewayFlags = []cli.Flag{ Usage: "Allow the external gateway bridge without an uplink port in local gateway mode", Destination: &cliConfig.Gateway.AllowNoUplink, }, + &cli.StringFlag{ + Name: "ephemeral-port-range", + Usage: "The port range in '-' format for OVN to use when SNAT'ing to a node IP. " + + "This range should not collide with the node port range being used in Kubernetes. If not provided, " + + "the default value will be derived from checking the sysctl value of net.ipv4.ip_local_port_range on the node.", + Destination: &cliConfig.Gateway.EphemeralPortRange, + Value: Gateway.EphemeralPortRange, + }, // Deprecated CLI options &cli.BoolFlag{ Name: "init-gateways", @@ -1917,6 +1935,19 @@ func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { if !found { return fmt.Errorf("invalid gateway mode %q: expect one of %s", string(Gateway.Mode), strings.Join(validModes, ",")) } + + if len(Gateway.EphemeralPortRange) > 0 { + if !isValidEphemeralPortRange(Gateway.EphemeralPortRange) { + return fmt.Errorf("invalid ephemeral-port-range, should be in the format -") + } + } else { + // auto-detect ephermal range + portRange, err := getKernelEphemeralPortRange() + if err != nil { + return fmt.Errorf("unable to auto-detect ephemeral port range to use with OVN") + } + Gateway.EphemeralPortRange = portRange + } } // Options are only valid if Mode is not disabled @@ -1927,6 +1958,9 @@ func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { if Gateway.NextHop != "" { return fmt.Errorf("gateway next-hop option %q not allowed when gateway is disabled", Gateway.NextHop) } + if len(Gateway.EphemeralPortRange) > 0 { + return fmt.Errorf("gateway ephemeral port range option not allowed when gateway is disabled") + } } if Gateway.Mode != GatewayModeShared && Gateway.VLANID != 0 { diff --git a/go-controller/pkg/config/utils.go b/go-controller/pkg/config/utils.go index 7ff8eff484..f0f0ff1a6b 100644 --- a/go-controller/pkg/config/utils.go +++ b/go-controller/pkg/config/utils.go @@ -3,7 +3,9 @@ package config import ( "fmt" "net" + "os" "reflect" + "regexp" "strconv" "strings" @@ -328,3 +330,49 @@ func AllocateV6MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIP } return nil } + +func isValidEphemeralPortRange(s string) bool { + // Regex to match "-" with no extra characters + re := regexp.MustCompile(`^(\d{1,5})-(\d{1,5})$`) + matches := re.FindStringSubmatch(s) + if matches == nil { + return false + } + + minPort, err1 := strconv.Atoi(matches[1]) + maxPort, err2 := strconv.Atoi(matches[2]) + if err1 != nil || err2 != nil { + return false + } + + // Port numbers must be in the 1-65535 range + if minPort < 1 || minPort > 65535 || maxPort < 0 || maxPort > 65535 { + return false + } + + return maxPort > minPort +} + +func getKernelEphemeralPortRange() (string, error) { + data, err := os.ReadFile("/proc/sys/net/ipv4/ip_local_port_range") + if err != nil { + return "", fmt.Errorf("failed to read port range: %w", err) + } + + parts := strings.Fields(string(data)) + if len(parts) != 2 { + return "", fmt.Errorf("unexpected format: %q", string(data)) + } + + minPort, err := strconv.Atoi(parts[0]) + if err != nil { + return "", fmt.Errorf("invalid min port: %w", err) + } + + maxPort, err := strconv.Atoi(parts[1]) + if err != nil { + return "", fmt.Errorf("invalid max port: %w", err) + } + + return fmt.Sprintf("%d-%d", minPort, maxPort), nil +} diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index da518f7cb3..3d5a6fc255 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -961,6 +961,10 @@ func buildNAT( Match: match, } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.Gateway.EphemeralPortRange + } + if logicalPort != "" { nat.LogicalPort = &logicalPort } @@ -1061,7 +1065,7 @@ func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { return false } - // Compre externalIP if its not empty. + // Compare externalIP if it's not empty. if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { return false } diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index da48869991..57f5fb4be2 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -220,13 +220,17 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN natUUID := fmt.Sprintf("nat-%d-UUID", i) natUUIDs = append(natUUIDs, natUUID) physicalIP, _ := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(subnet), l3GatewayConfig.IPAddresses) - testData = append(testData, &nbdb.NAT{ + nat := nbdb.NAT{ UUID: natUUID, ExternalIP: physicalIP.IP.String(), LogicalIP: subnet.String(), Options: map[string]string{"stateless": "false"}, Type: nbdb.NATTypeSNAT, - }) + } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.DefaultEphemeralPortRange + } + testData = append(testData, &nat) } } @@ -234,13 +238,17 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN natUUID := fmt.Sprintf("nat-join-%d-UUID", i) natUUIDs = append(natUUIDs, natUUID) joinLRPIP, _ := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(physicalIP), joinLRPIPs) - testData = append(testData, &nbdb.NAT{ + nat := nbdb.NAT{ UUID: natUUID, ExternalIP: physicalIP.IP.String(), LogicalIP: joinLRPIP.IP.String(), Options: map[string]string{"stateless": "false"}, Type: nbdb.NATTypeSNAT, - }) + } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.DefaultEphemeralPortRange + } + testData = append(testData, &nat) } testData = append(testData, &nbdb.MeterBand{ @@ -394,6 +402,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.Context("Gateway Creation Operations Shared Gateway Mode", func() { ginkgo.BeforeEach(func() { config.Gateway.Mode = config.GatewayModeShared + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange }) ginkgo.It("creates an IPv4 gateway in OVN", func() { @@ -1441,6 +1450,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.BeforeEach(func() { config.Gateway.Mode = config.GatewayModeLocal config.IPv6Mode = false + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange }) ginkgo.It("creates a dual-stack gateway in OVN", func() { diff --git a/go-controller/pkg/ovn/namespace_test.go b/go-controller/pkg/ovn/namespace_test.go index c067098709..3e8c556b8a 100644 --- a/go-controller/pkg/ovn/namespace_test.go +++ b/go-controller/pkg/ovn/namespace_test.go @@ -238,6 +238,7 @@ var _ = ginkgo.Describe("OVN Namespace Operations", func() { ginkgo.It("creates an address set for existing nodes when the host network traffic namespace is created", func() { config.Gateway.Mode = config.GatewayModeShared config.Gateway.NodeportEnable = true + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange var err error config.Default.ClusterSubnets, err = config.ParseClusterSubnetEntries(clusterCIDR) gomega.Expect(err).NotTo(gomega.HaveOccurred()) From 7a30735fc45cde1ae1fbea6012ca5c8f17cdb598 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 28 May 2025 12:30:20 -0400 Subject: [PATCH 004/181] Use watchFactory instead of kclient for gateway snat cleanup Signed-off-by: Tim Rozet --- go-controller/pkg/ovn/gateway.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 9ecbb512fd..7c38289737 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -11,8 +11,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -161,9 +159,7 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I // the SNATs stale podIPsWithSNAT := sets.New[string]() if !gw.isRoutingAdvertised(nodeName) && config.Gateway.DisableSNATMultipleGWs { - pods, err := gw.kube.GetPods(metav1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), - }) + pods, err := gw.watchFactory.GetAllPods() if err != nil { return fmt.Errorf("unable to list existing pods on node: %s, %w", nodeName, err) @@ -173,6 +169,9 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I if !util.PodScheduled(&pod) { //if the pod is not scheduled we should not remove the nat continue } + if pod.Spec.NodeName != nodeName { + continue + } if util.PodCompleted(&pod) { collidingPod, err := findPodWithIPAddresses(gw.watchFactory, gw.netInfo, []net.IP{utilnet.ParseIPSloppy(pod.Status.PodIP)}, "") //even if a pod is completed we should still delete the nat if the ip is not in use anymore if err != nil { From 8cf444c9e4bdd77795954e49c8cfc3dc45ffb3b8 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 28 May 2025 16:07:24 -0400 Subject: [PATCH 005/181] Removes as much kubeclient Get methods as possible Kubeclient get for nodes and pods were being used in other places in the code. Removed all of their uses except for specific cases like the ovn db manager and windows, where we do not have full informer setups. While transitioning to use the factory, it created a cylical dependency between metrics and factory libraries, due to the configuration duration recorder. Split the configuration duration recorder into its own sub-package under metrics/recorders. Signed-off-by: Tim Rozet --- .../pkg/controller/ho_node_windows.go | 6 +- .../pkg/controller/ovn_node_linux.go | 2 +- .../controllermanager/controller_manager.go | 3 +- .../node_controller_manager_test.go | 2 +- go-controller/pkg/kube/annotator_test.go | 2 +- go-controller/pkg/kube/kube.go | 57 +- go-controller/pkg/kube/kube_test.go | 2 +- go-controller/pkg/kube/mocks/Interface.go | 104 +-- go-controller/pkg/kube/mocks/InterfaceOVN.go | 102 +-- go-controller/pkg/metrics/cluster_manager.go | 41 +- go-controller/pkg/metrics/metrics.go | 15 +- go-controller/pkg/metrics/node.go | 23 +- go-controller/pkg/metrics/ovn.go | 59 +- go-controller/pkg/metrics/ovn_db.go | 77 +- go-controller/pkg/metrics/ovn_northd.go | 21 +- .../pkg/metrics/ovnkube_controller.go | 697 ++---------------- go-controller/pkg/metrics/ovs.go | 137 ++-- .../pkg/metrics/recorders/duration.go | 565 ++++++++++++++ .../duration_test.go} | 64 +- .../node/default_node_network_controller.go | 8 +- .../pkg/node/gateway_egressip_test.go | 24 +- go-controller/pkg/node/gateway_udn_test.go | 8 +- go-controller/pkg/ovn/base_event_handler.go | 18 +- .../pkg/ovn/base_network_controller.go | 3 +- .../admin_network_policy/metrics.go | 6 +- .../pkg/ovn/controller/network_qos/metrics.go | 26 +- .../ovn/controller/services/loadbalancer.go | 4 +- .../services/services_controller.go | 11 +- .../pkg/ovn/default_network_controller.go | 19 +- go-controller/pkg/ovn/hybrid.go | 4 +- go-controller/pkg/ovn/master.go | 2 +- go-controller/pkg/ovn/master_test.go | 2 +- go-controller/pkg/ovn/ovn_test.go | 2 +- .../secondary_localnet_network_controller.go | 4 +- .../pkg/ovndbmanager/ovndbmanager.go | 2 +- go-controller/pkg/types/const.go | 13 + 36 files changed, 989 insertions(+), 1146 deletions(-) create mode 100644 go-controller/pkg/metrics/recorders/duration.go rename go-controller/pkg/metrics/{ovnkube_controller_test.go => recorders/duration_test.go} (86%) diff --git a/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go b/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go index 5de9b75391..339bc289f7 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go +++ b/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go @@ -57,7 +57,7 @@ func newNodeController(kube kube.Interface, "UDP port. Please make sure you install all the KB updates on your system.") } - node, err := kube.GetNode(nodeName) + node, err := kube.GetNodeForWindows(nodeName) if err != nil { return nil, err } @@ -345,7 +345,7 @@ func (n *NodeController) initSelf(node *corev1.Node, nodeSubnet *net.IPNet) erro } // Add existing nodes - nodes, err := n.kube.GetNodes() + nodes, err := n.kube.GetNodesForWindows() if err != nil { return fmt.Errorf("error in initializing/fetching nodes: %v", err) } @@ -370,7 +370,7 @@ func (n *NodeController) uninitSelf(node *corev1.Node) error { networkName, n.networkID, node.Name) // Remove existing nodes - nodes, err := n.kube.GetNodes() + nodes, err := n.kube.GetNodesForWindows() if err != nil { return fmt.Errorf("failed to get nodes: %v", err) } diff --git a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go index 3c74239db0..df8a9559c8 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go +++ b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go @@ -261,7 +261,7 @@ func (n *NodeController) AddNode(node *corev1.Node) error { } else { // Make sure the local node has been initialized before adding a hybridOverlay remote node if atomic.LoadUint32(n.initState) < hotypes.DistributedRouterInitialized { - localNode, err := n.kube.GetNode(n.nodeName) + localNode, err := n.nodeLister.Get(n.nodeName) if err != nil { return fmt.Errorf("cannot get local node: %s: %w", n.nodeName, err) } diff --git a/go-controller/pkg/controllermanager/controller_manager.go b/go-controller/pkg/controllermanager/controller_manager.go index 06d88c4ce4..6b5f1e9f89 100644 --- a/go-controller/pkg/controllermanager/controller_manager.go +++ b/go-controller/pkg/controllermanager/controller_manager.go @@ -23,6 +23,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" @@ -438,7 +439,7 @@ func (cm *ControllerManager) Start(ctx context.Context) error { // with k=10, // for a cluster with 10 nodes, measurement of 1 in every 100 requests // for a cluster with 100 nodes, measurement of 1 in every 1000 requests - metrics.GetConfigDurationRecorder().Run(cm.nbClient, cm.kube, 10, time.Second*5, cm.stopChan) + recorders.GetConfigDurationRecorder().Run(cm.nbClient, cm.watchFactory, 10, time.Second*5, cm.stopChan) } cm.podRecorder.Run(cm.sbClient, cm.stopChan) diff --git a/go-controller/pkg/controllermanager/node_controller_manager_test.go b/go-controller/pkg/controllermanager/node_controller_manager_test.go index cf96448fe9..92d51a25d8 100644 --- a/go-controller/pkg/controllermanager/node_controller_manager_test.go +++ b/go-controller/pkg/controllermanager/node_controller_manager_test.go @@ -228,7 +228,7 @@ var _ = Describe("Healthcheck tests", func() { }, } nodeList := []*corev1.Node{node} - factoryMock.On("GetNode", nodeName).Return(nodeList[0], nil) + factoryMock.On("GetNodeForWindows", nodeName).Return(nodeList[0], nil) factoryMock.On("GetNodes").Return(nodeList, nil) factoryMock.On("UserDefinedNetworkInformer").Return(nil) factoryMock.On("ClusterUserDefinedNetworkInformer").Return(nil) diff --git a/go-controller/pkg/kube/annotator_test.go b/go-controller/pkg/kube/annotator_test.go index 0caa0956a0..4c66adb81c 100644 --- a/go-controller/pkg/kube/annotator_test.go +++ b/go-controller/pkg/kube/annotator_test.go @@ -79,7 +79,7 @@ var _ = Describe("Annotator", func() { err := nodeAnnot.Run() Expect(err).ToNot(HaveOccurred()) - node, err := kube.GetNode(nodeName) + node, err := kube.GetNodeForWindows(nodeName) Expect(err).ToNot(HaveOccurred()) // should contain initial annotations diff --git a/go-controller/pkg/kube/kube.go b/go-controller/pkg/kube/kube.go index 4171e398e2..7eccec3d7f 100644 --- a/go-controller/pkg/kube/kube.go +++ b/go-controller/pkg/kube/kube.go @@ -12,7 +12,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -62,12 +61,11 @@ type Interface interface { PatchNode(old, new *corev1.Node) error UpdateNodeStatus(node *corev1.Node) error UpdatePodStatus(pod *corev1.Pod) error - GetAnnotationsOnPod(namespace, name string) (map[string]string, error) - GetNodes() ([]*corev1.Node, error) - GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) - GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) - GetPod(namespace, name string) (*corev1.Pod, error) - GetNode(name string) (*corev1.Node, error) + // GetPodsForDBChecker should only be used by legacy DB checker. Use watchFactory instead to get pods. + GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) + // GetNodeForWindows should only be used for windows hybrid overlay binary and never in linux code + GetNodeForWindows(name string) (*corev1.Node, error) + GetNodesForWindows() ([]*corev1.Node, error) Events() kv1core.EventInterface } @@ -201,7 +199,7 @@ func (k *Kube) SetAnnotationsOnService(namespace, name string, annotations map[s // SetTaintOnNode tries to add a new taint to the node. If the taint already exists, it doesn't do anything. func (k *Kube) SetTaintOnNode(nodeName string, taint *corev1.Taint) error { - node, err := k.GetNode(nodeName) + node, err := k.GetNodeForWindows(nodeName) if err != nil { klog.Errorf("Unable to retrieve node %s for tainting %s: %v", nodeName, taint.ToString(), err) return err @@ -234,7 +232,7 @@ func (k *Kube) SetTaintOnNode(nodeName string, taint *corev1.Taint) error { // RemoveTaintFromNode removes all the taints that have the same key and effect from the node. // If the taint doesn't exist, it doesn't do anything. func (k *Kube) RemoveTaintFromNode(nodeName string, taint *corev1.Taint) error { - node, err := k.GetNode(nodeName) + node, err := k.GetNodeForWindows(nodeName) if err != nil { klog.Errorf("Unable to retrieve node %s for tainting %s: %v", nodeName, taint.ToString(), err) return err @@ -324,32 +322,8 @@ func (k *Kube) UpdatePodStatus(pod *corev1.Pod) error { return err } -// GetAnnotationsOnPod obtains the pod annotations from kubernetes apiserver, given the name and namespace -func (k *Kube) GetAnnotationsOnPod(namespace, name string) (map[string]string, error) { - pod, err := k.KClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return pod.ObjectMeta.Annotations, nil -} - -// GetNamespaces returns the list of all Namespace objects matching the labelSelector -func (k *Kube) GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) { - list := []*corev1.Namespace{} - err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { - return k.KClient.CoreV1().Namespaces().List(ctx, opts) - }).EachListItem(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.Set(labelSelector.MatchLabels).String(), - ResourceVersion: "0", - }, func(obj runtime.Object) error { - list = append(list, obj.(*corev1.Namespace)) - return nil - }) - return list, err -} - -// GetPods returns the list of all Pod objects in a namespace matching the options -func (k *Kube) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { +// GetPodsForDBChecker returns the list of all Pod objects in a namespace matching the options. Only used by the legacy db checker. +func (k *Kube) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { list := []*corev1.Pod{} opts.ResourceVersion = "0" err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { @@ -361,13 +335,8 @@ func (k *Kube) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod return list, err } -// GetPod obtains the pod from kubernetes apiserver, given the name and namespace -func (k *Kube) GetPod(namespace, name string) (*corev1.Pod, error) { - return k.KClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) -} - -// GetNodes returns the list of all Node objects from kubernetes -func (k *Kube) GetNodes() ([]*corev1.Node, error) { +// GetNodesForWindows returns the list of all Node objects from kubernetes. Only used by windows binary. +func (k *Kube) GetNodesForWindows() ([]*corev1.Node, error) { list := []*corev1.Node{} err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { return k.KClient.CoreV1().Nodes().List(ctx, opts) @@ -380,8 +349,8 @@ func (k *Kube) GetNodes() ([]*corev1.Node, error) { return list, err } -// GetNode returns the Node resource from kubernetes apiserver, given its name -func (k *Kube) GetNode(name string) (*corev1.Node, error) { +// GetNodeForWindows returns the Node resource from kubernetes apiserver, given its name. Only used by windows binary. +func (k *Kube) GetNodeForWindows(name string) (*corev1.Node, error) { return k.KClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) } diff --git a/go-controller/pkg/kube/kube_test.go b/go-controller/pkg/kube/kube_test.go index 4741cef0ed..d93119ff3a 100644 --- a/go-controller/pkg/kube/kube_test.go +++ b/go-controller/pkg/kube/kube_test.go @@ -96,7 +96,7 @@ var _ = Describe("Kube", func() { err := kube.SetTaintOnNode(node.Name, &taint) Expect(err).ToNot(HaveOccurred()) - updatedNode, err := kube.GetNode(node.Name) + updatedNode, err := kube.GetNodeForWindows(node.Name) Expect(err).ToNot(HaveOccurred()) Expect(updatedNode.Spec.Taints).To(Equal([]corev1.Taint{taint})) }) diff --git a/go-controller/pkg/kube/mocks/Interface.go b/go-controller/pkg/kube/mocks/Interface.go index 6631ca44c0..594d33d699 100644 --- a/go-controller/pkg/kube/mocks/Interface.go +++ b/go-controller/pkg/kube/mocks/Interface.go @@ -37,72 +37,12 @@ func (_m *Interface) Events() v1.EventInterface { return r0 } -// GetAnnotationsOnPod provides a mock function with given fields: namespace, name -func (_m *Interface) GetAnnotationsOnPod(namespace string, name string) (map[string]string, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetAnnotationsOnPod") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (map[string]string, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) map[string]string); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNamespaces provides a mock function with given fields: labelSelector -func (_m *Interface) GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) { - ret := _m.Called(labelSelector) - - if len(ret) == 0 { - panic("no return value specified for GetNamespaces") - } - - var r0 []*corev1.Namespace - var r1 error - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) ([]*corev1.Namespace, error)); ok { - return rf(labelSelector) - } - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) []*corev1.Namespace); ok { - r0 = rf(labelSelector) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*corev1.Namespace) - } - } - - if rf, ok := ret.Get(1).(func(metav1.LabelSelector) error); ok { - r1 = rf(labelSelector) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetNode provides a mock function with given fields: name -func (_m *Interface) GetNode(name string) (*corev1.Node, error) { +func (_m *Interface) GetNodeForWindows(name string) (*corev1.Node, error) { ret := _m.Called(name) if len(ret) == 0 { - panic("no return value specified for GetNode") + panic("no return value specified for GetNodeForWindows") } var r0 *corev1.Node @@ -127,12 +67,12 @@ func (_m *Interface) GetNode(name string) (*corev1.Node, error) { return r0, r1 } -// GetNodes provides a mock function with given fields: -func (_m *Interface) GetNodes() ([]*corev1.Node, error) { +// GetNodesForWindows provides a mock function with given fields: +func (_m *Interface) GetNodesForWindows() ([]*corev1.Node, error) { ret := _m.Called() if len(ret) == 0 { - panic("no return value specified for GetNodes") + panic("no return value specified for GetNodesForWindows") } var r0 []*corev1.Node @@ -157,42 +97,12 @@ func (_m *Interface) GetNodes() ([]*corev1.Node, error) { return r0, r1 } -// GetPod provides a mock function with given fields: namespace, name -func (_m *Interface) GetPod(namespace string, name string) (*corev1.Pod, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetPod") - } - - var r0 *corev1.Pod - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (*corev1.Pod, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) *corev1.Pod); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*corev1.Pod) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetPods provides a mock function with given fields: namespace, opts -func (_m *Interface) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { +func (_m *Interface) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { ret := _m.Called(namespace, opts) if len(ret) == 0 { - panic("no return value specified for GetPods") + panic("no return value specified for GetPodsForDBChecker") } var r0 []*corev1.Pod diff --git a/go-controller/pkg/kube/mocks/InterfaceOVN.go b/go-controller/pkg/kube/mocks/InterfaceOVN.go index 14a8a33af6..18e93ed800 100644 --- a/go-controller/pkg/kube/mocks/InterfaceOVN.go +++ b/go-controller/pkg/kube/mocks/InterfaceOVN.go @@ -91,36 +91,6 @@ func (_m *InterfaceOVN) Events() corev1.EventInterface { return r0 } -// GetAnnotationsOnPod provides a mock function with given fields: namespace, name -func (_m *InterfaceOVN) GetAnnotationsOnPod(namespace string, name string) (map[string]string, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetAnnotationsOnPod") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (map[string]string, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) map[string]string); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetEgressFirewalls provides a mock function with given fields: func (_m *InterfaceOVN) GetEgressFirewalls() ([]*egressfirewallv1.EgressFirewall, error) { ret := _m.Called() @@ -211,42 +181,12 @@ func (_m *InterfaceOVN) GetEgressIPs() ([]*egressipv1.EgressIP, error) { return r0, r1 } -// GetNamespaces provides a mock function with given fields: labelSelector -func (_m *InterfaceOVN) GetNamespaces(labelSelector metav1.LabelSelector) ([]*apicorev1.Namespace, error) { - ret := _m.Called(labelSelector) - - if len(ret) == 0 { - panic("no return value specified for GetNamespaces") - } - - var r0 []*apicorev1.Namespace - var r1 error - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) ([]*apicorev1.Namespace, error)); ok { - return rf(labelSelector) - } - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) []*apicorev1.Namespace); ok { - r0 = rf(labelSelector) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*apicorev1.Namespace) - } - } - - if rf, ok := ret.Get(1).(func(metav1.LabelSelector) error); ok { - r1 = rf(labelSelector) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetNode provides a mock function with given fields: name -func (_m *InterfaceOVN) GetNode(name string) (*apicorev1.Node, error) { +func (_m *InterfaceOVN) GetNodeForWindows(name string) (*apicorev1.Node, error) { ret := _m.Called(name) if len(ret) == 0 { - panic("no return value specified for GetNode") + panic("no return value specified for GetNodeForWindows") } var r0 *apicorev1.Node @@ -271,8 +211,8 @@ func (_m *InterfaceOVN) GetNode(name string) (*apicorev1.Node, error) { return r0, r1 } -// GetNodes provides a mock function with given fields: -func (_m *InterfaceOVN) GetNodes() ([]*apicorev1.Node, error) { +// GetNodesForWindows provides a mock function with given fields: +func (_m *InterfaceOVN) GetNodesForWindows() ([]*apicorev1.Node, error) { ret := _m.Called() if len(ret) == 0 { @@ -301,42 +241,12 @@ func (_m *InterfaceOVN) GetNodes() ([]*apicorev1.Node, error) { return r0, r1 } -// GetPod provides a mock function with given fields: namespace, name -func (_m *InterfaceOVN) GetPod(namespace string, name string) (*apicorev1.Pod, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetPod") - } - - var r0 *apicorev1.Pod - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (*apicorev1.Pod, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) *apicorev1.Pod); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*apicorev1.Pod) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetPods provides a mock function with given fields: namespace, opts -func (_m *InterfaceOVN) GetPods(namespace string, opts metav1.ListOptions) ([]*apicorev1.Pod, error) { +func (_m *InterfaceOVN) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*apicorev1.Pod, error) { ret := _m.Called(namespace, opts) if len(ret) == 0 { - panic("no return value specified for GetPods") + panic("no return value specified for GetPodsForDBChecker") } var r0 []*apicorev1.Pod diff --git a/go-controller/pkg/metrics/cluster_manager.go b/go-controller/pkg/metrics/cluster_manager.go index 3acba72759..f97a338b89 100644 --- a/go-controller/pkg/metrics/cluster_manager.go +++ b/go-controller/pkg/metrics/cluster_manager.go @@ -7,28 +7,29 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) var registerClusterManagerBaseMetrics sync.Once // MetricClusterManagerLeader identifies whether this instance of ovnkube-cluster-manager is a leader or not var MetricClusterManagerLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "leader", Help: "Identifies whether the instance of ovnkube-cluster-manager is a leader(1) or not(0).", }) var MetricClusterManagerReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "ready_duration_seconds", Help: "The duration for the cluster manager to get to ready state", }) var metricV4HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_v4_host_subnets", Help: "The total number of v4 host subnets possible per network"}, []string{ @@ -37,8 +38,8 @@ var metricV4HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricV6HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_v6_host_subnets", Help: "The total number of v6 host subnets possible per network"}, []string{ @@ -47,8 +48,8 @@ var metricV6HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricV4AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "allocated_v4_host_subnets", Help: "The total number of v4 host subnets currently allocated per network"}, []string{ @@ -57,8 +58,8 @@ var metricV4AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOp ) var metricV6AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "allocated_v6_host_subnets", Help: "The total number of v6 host subnets currently allocated per network"}, []string{ @@ -68,22 +69,22 @@ var metricV6AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOp /** EgressIP metrics recorded from cluster-manager begins**/ var metricEgressIPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_egress_ips", Help: "The number of defined egress IP addresses", }) var metricEgressIPNodeUnreacheableCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "egress_ips_node_unreachable_total", Help: "The total number of times assigned egress IP(s) were unreachable"}, ) var metricEgressIPRebalanceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "egress_ips_rebalance_total", Help: "The total number of times assigned egress IP(s) needed to be moved to a different node"}, ) @@ -98,8 +99,8 @@ func RegisterClusterManagerBase() { prometheus.MustRegister(MetricClusterManagerReadyDuration) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it", diff --git a/go-controller/pkg/metrics/metrics.go b/go-controller/pkg/metrics/metrics.go index 847619b8dd..ea86a65c0d 100644 --- a/go-controller/pkg/metrics/metrics.go +++ b/go-controller/pkg/metrics/metrics.go @@ -27,22 +27,11 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) const ( - MetricOvnkubeNamespace = "ovnkube" - MetricOvnkubeSubsystemController = "controller" - MetricOvnkubeSubsystemClusterManager = "clustermanager" - MetricOvnkubeSubsystemNode = "node" - MetricOvnNamespace = "ovn" - MetricOvnSubsystemDB = "db" - MetricOvnSubsystemNorthd = "northd" - MetricOvnSubsystemController = "controller" - MetricOvsNamespace = "ovs" - MetricOvsSubsystemVswitchd = "vswitchd" - MetricOvsSubsystemDB = "db" - ovnNorthd = "ovn-northd" ovnController = "ovn-controller" ovsVswitchd = "ovs-vswitchd" @@ -82,7 +71,7 @@ type stopwatchStatistics struct { // resource reached the maximum retry limit and will not be retried. This metric doesn't // need Subsystem string since it is applicable for both master and node. var MetricResourceRetryFailuresCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, + Namespace: types.MetricOvnkubeNamespace, Name: "resource_retry_failures_total", Help: "The total number of times processing a Kubernetes resource reached the maximum retry limit and was no longer processed", }) diff --git a/go-controller/pkg/metrics/node.go b/go-controller/pkg/metrics/node.go index 3b19c334d7..07e621fc97 100644 --- a/go-controller/pkg/metrics/node.go +++ b/go-controller/pkg/metrics/node.go @@ -7,13 +7,14 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // MetricCNIRequestDuration is a prometheus metric that tracks the duration // of CNI requests var MetricCNIRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "cni_request_duration_seconds", Help: "The duration of CNI server requests.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -22,23 +23,23 @@ var MetricCNIRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOp ) var MetricNodeReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "ready_duration_seconds", Help: "The duration for the node to get to ready state.", }) var metricOvnNodePortEnabled = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "nodeport_enabled", Help: "Specifies if the node port is enabled on this node(1) or not(0).", }) // metric to get the size of ovnkube.log file var metricOvnKubeNodeLogFileSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "logfile_size_bytes", Help: "The size of ovnkube logfile on the node."}, []string{ @@ -56,8 +57,8 @@ func RegisterNodeMetrics(stopChan <-chan struct{}) { prometheus.MustRegister(metricOvnNodePortEnabled) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it.", @@ -72,7 +73,7 @@ func RegisterNodeMetrics(stopChan <-chan struct{}) { }, func() float64 { return 1 }, )) - registerWorkqueueMetrics(MetricOvnkubeNamespace, MetricOvnkubeSubsystemNode) + registerWorkqueueMetrics(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemNode) if err := prometheus.Register(MetricResourceRetryFailuresCount); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) diff --git a/go-controller/pkg/metrics/ovn.go b/go-controller/pkg/metrics/ovn.go index 4cce457ea5..63f057e38f 100644 --- a/go-controller/pkg/metrics/ovn.go +++ b/go-controller/pkg/metrics/ovn.go @@ -12,29 +12,30 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" ) // ovnController Configuration metrics var metricRemoteProbeInterval = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "remote_probe_interval_seconds", Help: "The inactivity probe interval of the connection to the OVN SB DB.", }) var metricOpenFlowProbeInterval = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "openflow_probe_interval_seconds", Help: "The inactivity probe interval of the OpenFlow connection to the " + "OpenvSwitch integration bridge.", }) var metricMonitorAll = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "monitor_all", Help: "Specifies if ovn-controller should monitor all records of tables in OVN SB DB. " + "If set to false, it will conditionally monitor the records that " + @@ -42,8 +43,8 @@ var metricMonitorAll = prometheus.NewGauge(prometheus.GaugeOpts{ }) var metricEncapIP = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "encap_ip", Help: "A metric with a constant '1' value labeled by ipadress that " + "specifies the encapsulation ip address configured on that node.", @@ -54,8 +55,8 @@ var metricEncapIP = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricSbConnectionMethod = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "sb_connection_method", Help: "A metric with a constant '1' value labeled by connection_method that " + "specifies the ovn-remote value configured on that node.", @@ -66,8 +67,8 @@ var metricSbConnectionMethod = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricEncapType = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "encap_type", Help: "A metric with a constant '1' value labeled by type that " + "specifies the encapsulation type a chassis should use to " + @@ -79,8 +80,8 @@ var metricEncapType = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricBridgeMappings = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "bridge_mappings", Help: "A metric with a constant '1' value labeled by mapping that " + "specifies list of key-value pairs that map a physical network name " + @@ -92,8 +93,8 @@ var metricBridgeMappings = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOVNControllerSBDBConnection = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "southbound_database_connected", Help: "Specifies if OVN controller is connected to OVN southbound database (1) or not (0)", }) @@ -261,11 +262,11 @@ func setOvnControllerConfigurationMetrics(ovsDBClient libovsdbclient.Client) (er } openflowProbeField := openvSwitch.ExternalIDs["ovn-bridge-remote-probe-interval"] - openflowProbeVal := parseMetricToFloat(MetricOvnSubsystemController, "ovn-bridge-remote-probe-interval", openflowProbeField) + openflowProbeVal := parseMetricToFloat(types.MetricOvnSubsystemController, "ovn-bridge-remote-probe-interval", openflowProbeField) metricOpenFlowProbeInterval.Set(openflowProbeVal) remoteProbeField := openvSwitch.ExternalIDs["ovn-remote-probe-interval"] - remoteProbeValue := parseMetricToFloat(MetricOvnSubsystemController, "ovn-remote-probe-interval", remoteProbeField) + remoteProbeValue := parseMetricToFloat(types.MetricOvnSubsystemController, "ovn-remote-probe-interval", remoteProbeField) metricRemoteProbeInterval.Set(remoteProbeValue / 1000) var ovnMonitorValue float64 @@ -406,8 +407,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, getOvnControllerVersionInfo() ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "build_info", Help: "A metric with a constant '1' value labeled by version and library " + "from which ovn binaries were built", @@ -423,8 +424,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, ovnRegistry.MustRegister(metricOVNControllerSBDBConnection) ovnRegistry.MustRegister(prometheus.NewCounterFunc( prometheus.CounterOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_openflow_total", Help: "The total number of OpenFlow flows in the integration bridge.", }, func() float64 { @@ -437,7 +438,7 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, for _, kvPair := range strings.Fields(stdout) { if strings.HasPrefix(kvPair, "flow_count=") { value := strings.Split(kvPair, "=")[1] - return parseMetricToFloat(MetricOvnSubsystemController, "integration_bridge_openflow_total", + return parseMetricToFloat(types.MetricOvnSubsystemController, "integration_bridge_openflow_total", value) } } @@ -445,8 +446,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, })) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_patch_ports", Help: "Captures the number of patch ports that connect br-int OVS " + "bridge to physical OVS bridge and br-local OVS bridge.", @@ -456,8 +457,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, })) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_geneve_ports", Help: "Captures the number of geneve ports that are on br-int OVS bridge.", }, @@ -475,11 +476,11 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, ovnRegistry.MustRegister(metricBridgeMappings) // Register the ovn-controller coverage/show metrics componentCoverageShowMetricsMap[ovnController] = ovnControllerCoverageShowMetricsMap - registerCoverageShowMetrics(ovnController, MetricOvnNamespace, MetricOvnSubsystemController) + registerCoverageShowMetrics(ovnController, types.MetricOvnNamespace, types.MetricOvnSubsystemController) // Register the ovn-controller coverage/show metrics componentStopwatchShowMetricsMap[ovnController] = ovnControllerStopwatchShowMetricsMap - registerStopwatchShowMetrics(ovnController, MetricOvnNamespace, MetricOvnSubsystemController) + registerStopwatchShowMetrics(ovnController, types.MetricOvnNamespace, types.MetricOvnSubsystemController) // ovn-controller configuration metrics updater go ovnControllerConfigurationMetricsUpdater(ovsDBClient, diff --git a/go-controller/pkg/metrics/ovn_db.go b/go-controller/pkg/metrics/ovn_db.go index 8116a68732..ea206adc3c 100644 --- a/go-controller/pkg/metrics/ovn_db.go +++ b/go-controller/pkg/metrics/ovn_db.go @@ -14,12 +14,13 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) var metricOVNDBSessions = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "jsonrpc_server_sessions", Help: "Active number of JSON RPC Server sessions to the DB"}, []string{ @@ -28,8 +29,8 @@ var metricOVNDBSessions = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOVNDBMonitor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "ovsdb_monitors", Help: "Number of OVSDB Monitors on the server"}, []string{ @@ -38,8 +39,8 @@ var metricOVNDBMonitor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "db_size_bytes", Help: "The size of the database file associated with the OVN DB component."}, []string{ @@ -49,8 +50,8 @@ var metricDBSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ClusterStatus metrics var metricDBClusterCID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_id", Help: "A metric with a constant '1' value labeled by database name and cluster uuid"}, []string{ @@ -60,8 +61,8 @@ var metricDBClusterCID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterSID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_id", Help: "A metric with a constant '1' value labeled by database name, cluster uuid " + "and server uuid"}, @@ -73,8 +74,8 @@ var metricDBClusterSID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_status", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "server status"}, @@ -87,8 +88,8 @@ var metricDBClusterServerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerRole = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_role", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "and server role"}, @@ -101,8 +102,8 @@ var metricDBClusterServerRole = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterTerm = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_term", Help: "A metric that returns the current election term value labeled by database name, cluster uuid, and " + "server uuid"}, @@ -114,8 +115,8 @@ var metricDBClusterTerm = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerVote = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_vote", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "and server vote"}, @@ -128,8 +129,8 @@ var metricDBClusterServerVote = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterElectionTimer = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_election_timer", Help: "A metric that returns the current election timer value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -141,8 +142,8 @@ var metricDBClusterElectionTimer = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogIndexStart = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_index_start", Help: "A metric that returns the log entry index start value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -154,8 +155,8 @@ var metricDBClusterLogIndexStart = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogIndexNext = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_index_next", Help: "A metric that returns the log entry index next value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -167,8 +168,8 @@ var metricDBClusterLogIndexNext = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogNotCommitted = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_not_committed", Help: "A metric that returns the number of log entries not committed labeled by database name, cluster uuid, " + "and server uuid"}, @@ -180,8 +181,8 @@ var metricDBClusterLogNotCommitted = prometheus.NewGaugeVec(prometheus.GaugeOpts ) var metricDBClusterLogNotApplied = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_not_applied", Help: "A metric that returns the number of log entries not applied labeled by database name, cluster uuid, " + "and server uuid"}, @@ -193,8 +194,8 @@ var metricDBClusterLogNotApplied = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_inbound_connections_total", Help: "A metric that returns the total number of inbound connections to the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -206,8 +207,8 @@ var metricDBClusterConnIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_outbound_connections_total", Help: "A metric that returns the total number of outbound connections from the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -219,8 +220,8 @@ var metricDBClusterConnOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnInErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_inbound_connections_error_total", Help: "A metric that returns the total number of failed inbound connections to the server labeled by " + " database name, cluster uuid, and server uuid"}, @@ -232,8 +233,8 @@ var metricDBClusterConnInErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnOutErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_outbound_connections_error_total", Help: "A metric that returns the total number of failed outbound connections from the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -382,8 +383,8 @@ func RegisterOvnDBMetrics(clientset kubernetes.Interface, k8sNodeName string, st ovnRegistry.MustRegister(metricOVNDBSessions) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "build_info", Help: "A metric with a constant '1' value labeled by ovsdb-server version and " + "NB and SB schema version", diff --git a/go-controller/pkg/metrics/ovn_northd.go b/go-controller/pkg/metrics/ovn_northd.go index ae2afe45c8..e72c89fdd3 100644 --- a/go-controller/pkg/metrics/ovn_northd.go +++ b/go-controller/pkg/metrics/ovn_northd.go @@ -11,6 +11,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -128,8 +129,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string getOvnNorthdVersionInfo() ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "build_info", Help: "A metric with a constant '1' value labeled by version and library " + "from which ovn binaries were built", @@ -142,8 +143,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "status", Help: "Specifies whether this instance of ovn-northd is standby(0) or active(1) or paused(2).", }, func() float64 { @@ -169,8 +170,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "nb_connection_status", Help: "Specifies nb-connection-status of ovn-northd, not connected(0) or connected(1).", }, func() float64 { @@ -179,8 +180,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "sb_connection_status", Help: "Specifies sb-connection-status of ovn-northd, not connected(0) or connected(1).", }, func() float64 { @@ -190,11 +191,11 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string // Register the ovn-northd coverage/show metrics with prometheus componentCoverageShowMetricsMap[ovnNorthd] = ovnNorthdCoverageShowMetricsMap - registerCoverageShowMetrics(ovnNorthd, MetricOvnNamespace, MetricOvnSubsystemNorthd) + registerCoverageShowMetrics(ovnNorthd, types.MetricOvnNamespace, types.MetricOvnSubsystemNorthd) go coverageShowMetricsUpdater(ovnNorthd, stopChan) // Register the ovn-northd stopwatch/show metrics with prometheus componentStopwatchShowMetricsMap[ovnNorthd] = ovnNorthdStopwatchShowMetricsMap - registerStopwatchShowMetrics(ovnNorthd, MetricOvnNamespace, MetricOvnSubsystemNorthd) + registerStopwatchShowMetrics(ovnNorthd, types.MetricOvnNamespace, types.MetricOvnSubsystemNorthd) go stopwatchShowMetricsUpdater(ovnNorthd, stopChan) } diff --git a/go-controller/pkg/metrics/ovnkube_controller.go b/go-controller/pkg/metrics/ovnkube_controller.go index b73a0ad8de..a4cb9fd693 100644 --- a/go-controller/pkg/metrics/ovnkube_controller.go +++ b/go-controller/pkg/metrics/ovnkube_controller.go @@ -3,8 +3,6 @@ package metrics import ( "errors" "fmt" - "hash/fnv" - "math" "runtime" "strconv" "sync" @@ -21,14 +19,13 @@ import ( "github.com/ovn-org/libovsdb/cache" libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -37,16 +34,16 @@ import ( // read from SB DB. This is registered within func RunTimestamp in order to allow gathering this // metric on the fly when metrics are scraped. var metricNbE2eTimestamp = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nb_e2e_timestamp", Help: "The current e2e-timestamp value as written to the northbound database"}, ) // metricDbTimestamp is the UNIX timestamp seen in NB and SB DBs. var metricDbTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "e2e_timestamp", Help: "The current e2e-timestamp value as observed in this instance of the database"}, []string{ @@ -57,8 +54,8 @@ var metricDbTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // metricPodCreationLatency is the time between a pod being scheduled and // completing its logical switch port configuration. var metricPodCreationLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_creation_latency_seconds", Help: "The duration between a pod being scheduled and completing its logical switch port configuration", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -66,8 +63,8 @@ var metricPodCreationLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ // MetricResourceUpdateCount is the number of times a particular resource's UpdateFunc has been called. var MetricResourceUpdateCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_update_total", Help: "The number of times a given resource event (add, update, or delete) has been handled"}, []string{ @@ -79,8 +76,8 @@ var MetricResourceUpdateCount = prometheus.NewCounterVec(prometheus.CounterOpts{ // MetricResourceAddLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceAddLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_add_latency_seconds", Help: "The duration to process all handlers for a given resource event - add.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -89,8 +86,8 @@ var MetricResourceAddLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ // MetricResourceUpdateLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceUpdateLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_update_latency_seconds", Help: "The duration to process all handlers for a given resource event - update.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -99,8 +96,8 @@ var MetricResourceUpdateLatency = prometheus.NewHistogram(prometheus.HistogramOp // MetricResourceDeleteLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceDeleteLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_delete_latency_seconds", Help: "The duration to process all handlers for a given resource event - delete.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -108,32 +105,32 @@ var MetricResourceDeleteLatency = prometheus.NewHistogram(prometheus.HistogramOp // MetricRequeueServiceCount is the number of times a particular service has been requeued. var MetricRequeueServiceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "requeue_service_total", Help: "A metric that captures the number of times a service is requeued after failing to sync with OVN"}, ) // MetricSyncServiceCount is the number of times a particular service has been synced. var MetricSyncServiceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_service_total", Help: "A metric that captures the number of times a service is synced with OVN load balancers"}, ) // MetricSyncServiceLatency is the time taken to sync a service with the OVN load balancers. var MetricSyncServiceLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_service_latency_seconds", Help: "The latency of syncing a service with the OVN load balancers", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, ) var MetricOVNKubeControllerReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "ready_duration_seconds", Help: "The duration for the ovnkube-controller to get to ready state", }) @@ -141,8 +138,8 @@ var MetricOVNKubeControllerReadyDuration = prometheus.NewGauge(prometheus.GaugeO // MetricOVNKubeControllerSyncDuration is the time taken to complete initial Watch for different resource. // Resource name is in the label. var MetricOVNKubeControllerSyncDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_duration_seconds", Help: "The duration to sync and setup all handlers for a given resource"}, []string{ @@ -151,15 +148,15 @@ var MetricOVNKubeControllerSyncDuration = prometheus.NewGaugeVec(prometheus.Gaug // MetricOVNKubeControllerLeader identifies whether this instance of ovnkube-controller is a leader or not var MetricOVNKubeControllerLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "leader", Help: "Identifies whether the instance of ovnkube-controller is a leader(1) or not(0).", }) var metricOvnKubeControllerLogFileSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "logfile_size_bytes", Help: "The size of ovnkube-controller log file."}, []string{ @@ -168,24 +165,24 @@ var metricOvnKubeControllerLogFileSize = prometheus.NewGaugeVec(prometheus.Gauge ) var metricEgressIPAssignLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_ips_assign_latency_seconds", Help: "The latency of egress IP assignment to ovn nb database", Buckets: prometheus.ExponentialBuckets(.001, 2, 15), }) var metricEgressIPUnassignLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_ips_unassign_latency_seconds", Help: "The latency of egress IP unassignment from ovn nb database", Buckets: prometheus.ExponentialBuckets(.001, 2, 15), }) var metricNetpolEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_event_latency_seconds", Help: "The latency of full network policy event handling (create, delete)", Buckets: prometheus.ExponentialBuckets(.004, 2, 15)}, @@ -194,8 +191,8 @@ var metricNetpolEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOp }) var metricNetpolLocalPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_local_pod_event_latency_seconds", Help: "The latency of local pod events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -204,8 +201,8 @@ var metricNetpolLocalPodEventLatency = prometheus.NewHistogramVec(prometheus.His }) var metricNetpolPeerNamespaceEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_peer_namespace_event_latency_seconds", Help: "The latency of peer namespace events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -214,8 +211,8 @@ var metricNetpolPeerNamespaceEventLatency = prometheus.NewHistogramVec(prometheu }) var metricPodSelectorAddrSetPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_selector_address_set_pod_event_latency_seconds", Help: "The latency of peer pod events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -224,8 +221,8 @@ var metricPodSelectorAddrSetPodEventLatency = prometheus.NewHistogramVec(prometh }) var metricPodSelectorAddrSetNamespaceEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_selector_address_set_namespace_event_latency_seconds", Help: "The latency of peer namespace events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -234,8 +231,8 @@ var metricPodSelectorAddrSetNamespaceEventLatency = prometheus.NewHistogramVec(p }) var metricPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_event_latency_seconds", Help: "The latency of pod events handling (add, update, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -244,51 +241,51 @@ var metricPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ }) var metricEgressFirewallRuleCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_egress_firewall_rules", Help: "The number of egress firewall rules defined"}, ) var metricIPsecEnabled = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "ipsec_enabled", Help: "Specifies whether IPSec is enabled for this cluster(1) or not enabled for this cluster(0)", }) var metricEgressRoutingViaHost = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_routing_via_host", Help: "Specifies whether egress gateway mode is via host networking stack(1) or not(0)", }) var metricEgressFirewallCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_egress_firewalls", Help: "The number of egress firewall policies", }) /** AdminNetworkPolicyMetrics Begin**/ var metricANPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "admin_network_policies", Help: "The total number of admin network policies in the cluster", }) var metricBANPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "baseline_admin_network_policies", Help: "The total number of baseline admin network policies in the cluster", }) var metricANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "admin_network_policies_db_objects", Help: "The total number of OVN NBDB objects (table_name) owned by AdminNetworkPolicy controller in the cluster"}, []string{ @@ -297,8 +294,8 @@ var metricANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricBANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "baseline_admin_network_policies_db_objects", Help: "The total number of OVN NBDB objects (table_name) owned by BaselineAdminNetworkPolicy controller in the cluster"}, []string{ @@ -310,64 +307,37 @@ var metricBANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // metricFirstSeenLSPLatency is the time between a pod first seen in OVN-Kubernetes and its Logical Switch Port is created var metricFirstSeenLSPLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_first_seen_lsp_created_duration_seconds", Help: "The duration between a pod first observed in OVN-Kubernetes and Logical Switch Port created", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricLSPPortBindingLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_lsp_created_port_binding_duration_seconds", Help: "The duration between a pods Logical Switch Port created and port binding observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricPortBindingChassisLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_port_binding_port_binding_chassis_duration_seconds", Help: "The duration between a pods port binding observed and port binding chassis update observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricPortBindingUpLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_port_binding_chassis_port_binding_up_duration_seconds", Help: "The duration between a pods port binding chassis update and port binding up observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) -var metricNetworkProgramming prometheus.ObserverVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, - Name: "network_programming_duration_seconds", - Help: "The duration to apply network configuration for a kind (e.g. pod, service, networkpolicy). " + - "Configuration includes add, update and delete events for each kind.", - Buckets: merge( - prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s - prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s - prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s - prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min - []string{ - "kind", - }) - -var metricNetworkProgrammingOVN = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, - Name: "network_programming_ovn_duration_seconds", - Help: "The duration for OVN to apply network configuration", - Buckets: merge( - prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s - prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s - prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s - prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min -) - const ( globalOptionsTimestampField = "e2e_timestamp" globalOptionsProbeIntervalField = "northd_probe_interval" @@ -381,8 +351,8 @@ func RegisterOVNKubeControllerBase() { prometheus.MustRegister(MetricOVNKubeControllerSyncDuration) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it", @@ -410,11 +380,11 @@ func RegisterOVNKubeControllerPerformance(nbClient libovsdbclient.Client) { prometheus.MustRegister(MetricRequeueServiceCount) prometheus.MustRegister(MetricSyncServiceCount) prometheus.MustRegister(MetricSyncServiceLatency) - registerWorkqueueMetrics(MetricOvnkubeNamespace, MetricOvnkubeSubsystemController) + registerWorkqueueMetrics(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "northd_probe_interval", Help: "The maximum number of milliseconds of idle time on connection to the OVN SB " + "and NB DB before sending an inactivity probe message", @@ -505,8 +475,8 @@ func RunTimestamp(stopChan <-chan struct{}, sbClient, nbClient libovsdbclient.Cl // cache when metrics HTTP endpoint is scraped. prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sb_e2e_timestamp", Help: "The current e2e-timestamp value as observed in the southbound database", }, func() float64 { @@ -916,521 +886,6 @@ func getPodUIDFromPortBinding(row *sbdb.PortBinding) kapimtypes.UID { return kapimtypes.UID(podUID) } -const ( - updateOVNMeasurementChSize = 500 - deleteOVNMeasurementChSize = 50 - processChSize = 1000 - nbGlobalTable = "NB_Global" - //fixme: remove when bug is fixed in OVN (Red Hat bugzilla bug number 2074019). Also, handle overflow event. - maxNbCfg = math.MaxUint32 - 1000 - maxMeasurementLifetime = 20 * time.Minute -) - -type ovnMeasurement struct { - // time just before ovsdb tx is called - startTimestamp time.Time - // time when the nbCfg value and its associated configuration is applied to all nodes - endTimestamp time.Time - // OVN measurement complete - start and end timestamps are valid - complete bool - // nb_cfg value that started the measurement - nbCfg int -} - -// measurement stores a measurement attempt through OVN-Kubernetes controller and optionally OVN -type measurement struct { - // kubernetes kind e.g. pod or service - kind string - // time when Add is executed - startTimestamp time.Time - // time when End is executed - endTimestamp time.Time - // if true endTimestamp is valid - end bool - // time when this measurement expires. Set during Add - expiresAt time.Time - // OVN measurement(s) via AddOVN - ovnMeasurements []ovnMeasurement -} - -// hvCfgUpdate holds the information received from OVN Northbound event handler -type hvCfgUpdate struct { - // timestamp is in milliseconds - timestamp int - hvCfg int -} - -type ConfigDurationRecorder struct { - // rate at which measurements are allowed. Probabilistically, 1 in every measurementRate - measurementRate uint64 - measurements map[string]measurement - // controls RW access to measurements map - measurementsMu sync.RWMutex - // channel to trigger processing a measurement following call to End func. Channel string is kind/namespace/name - triggerProcessCh chan string - enabled bool -} - -// global variable is needed because this functionality is accessed in many functions -var cdr *ConfigDurationRecorder - -// lock for accessing the cdr global variable -var cdrMutex sync.Mutex - -func GetConfigDurationRecorder() *ConfigDurationRecorder { - cdrMutex.Lock() - defer cdrMutex.Unlock() - if cdr == nil { - cdr = &ConfigDurationRecorder{} - } - return cdr -} - -var configDurationRegOnce sync.Once - -// Run monitors the config duration for OVN-Kube master to configure k8 kinds. A measurement maybe allowed and this is -// related to the number of k8 nodes, N [1] and by argument k [2] where there is a probability that 1 out of N*k -// measurement attempts are allowed. If k=0, all measurements are allowed. mUpdatePeriod determines the period to -// process and publish metrics -// [1] 1 0. The measurement rate is proportional to - // the number of nodes, N and argument k. 1 out of every N*k attempted measurements will succeed. - - // For the optional OVN measurement by calling AddOVN, when the CMS is about to make a transaction to configure - // whatever kind, a call to AddOVN function allows the caller to measure OVN duration. - // An ovsdb operation is returned to the caller of AddOVN, which they can bundle with their existing transactions - // sent to OVN which will tell OVN to measure how long it takes to configure all nodes with the config in the transaction. - // Config duration then waits for OVN to configure all nodes and calculates the time delta. - - // ** configuration duration recorder - caveats ** - // For the optional OVN recording, it does not give you an exact time duration for how long it takes to configure your - // k8 kind. When you are recording how long it takes OVN to complete your configuration to all nodes, other - // transactions may have occurred which may increases the overall time. You may also get longer processing times if one - // or more nodes are unavailable because we are measuring how long the functionality takes to apply to ALL nodes. - - // ** configuration duration recorder - How the duration of the config is measured within OVN ** - // We increment the nb_cfg integer value in the NB_Global table. - // ovn-northd notices the nb_cfg change and copies the nb_cfg value to SB_Global table field nb_cfg along with any - // other configuration that is changed in OVN Northbound database. - // All ovn-controllers detect nb_cfg value change and generate a 'barrier' on the openflow connection to the - // nodes ovs-vswitchd. Once ovn-controllers receive the 'barrier processed' reply from ovs-vswitchd which - // indicates that all relevant openflow operations associated with NB_Globals nb_cfg value have been - // propagated to the nodes OVS, it copies the SB_Global nb_cfg value to its Chassis_Private table nb_cfg record. - // ovn-northd detects changes to the Chassis_Private startRecords and computes the minimum nb_cfg for all Chassis_Private - // nb_cfg and stores this in NB_Global hv_cfg field along with a timestamp to field hv_cfg_timestamp which - // reflects the time when the slowest chassis catches up with the northbound configuration. - configDurationRegOnce.Do(func() { - prometheus.MustRegister(metricNetworkProgramming) - prometheus.MustRegister(metricNetworkProgrammingOVN) - }) - - cr.measurements = make(map[string]measurement) - // watch node count and adjust measurement rate if node count changes - cr.runMeasurementRateAdjuster(kube, k, time.Hour, stop) - // we currently do not clean the following channels up upon exit - cr.triggerProcessCh = make(chan string, processChSize) - updateOVNMeasurementCh := make(chan hvCfgUpdate, updateOVNMeasurementChSize) - deleteOVNMeasurementCh := make(chan int, deleteOVNMeasurementChSize) - go cr.processMeasurements(workerLoopPeriod, updateOVNMeasurementCh, deleteOVNMeasurementCh, stop) - - nbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ - UpdateFunc: func(table string, old model.Model, new model.Model) { - if table != nbGlobalTable { - return - } - oldRow := old.(*nbdb.NBGlobal) - newRow := new.(*nbdb.NBGlobal) - - if oldRow.HvCfg != newRow.HvCfg && oldRow.HvCfgTimestamp != newRow.HvCfgTimestamp && newRow.HvCfgTimestamp > 0 { - select { - case updateOVNMeasurementCh <- hvCfgUpdate{hvCfg: newRow.HvCfg, timestamp: newRow.HvCfgTimestamp}: - default: - klog.Warning("Config duration recorder: unable to update OVN measurement") - select { - case deleteOVNMeasurementCh <- newRow.HvCfg: - default: - } - } - } - }, - }) - cr.enabled = true -} - -// Start allows the caller to attempt measurement of a control plane configuration duration, as a metric, -// the duration between functions Start and End. Optionally, if you wish to record OVN config duration, -// call AddOVN which will add the duration for OVN to apply the configuration to all nodes. -// The caller must pass kind,namespace,name which will be used to determine if the object -// is allowed to record. To allow no locking, each go routine that calls this function, can determine itself -// if it is allowed to measure. -// There is a mandatory two-step process to complete a measurement. -// Step 1) Call Start when you wish to begin a measurement - ideally when processing for the object starts -// Step 2) Call End which will complete a measurement -// Optionally, call AddOVN when you are making a transaction to OVN in order to add on the OVN duration to an existing -// measurement. This must be called between Start and End. Not every call to Start will result in a measurement -// and the rate of measurements depends on the number of nodes and function Run arg k. -// Only one measurement for a kind/namespace/name is allowed until the current measurement is Ended (via End) and -// processed. This is guaranteed by workqueues (even with multiple workers) and informer event handlers. -func (cr *ConfigDurationRecorder) Start(kind, namespace, name string) (time.Time, bool) { - if !cr.enabled { - return time.Time{}, false - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return time.Time{}, false - } - measurementTimestamp := time.Now() - cr.measurementsMu.Lock() - _, found := cr.measurements[kindNamespaceName] - // we only record for measurements that aren't in-progress - if !found { - cr.measurements[kindNamespaceName] = measurement{kind: kind, startTimestamp: measurementTimestamp, - expiresAt: measurementTimestamp.Add(maxMeasurementLifetime)} - } - cr.measurementsMu.Unlock() - return measurementTimestamp, !found -} - -// allowedToMeasure determines if we are allowed to measure or not. To avoid the cost of synchronisation by using locks, -// we use probability. For a value of kindNamespaceName that returns true, it will always return true. -func (cr *ConfigDurationRecorder) allowedToMeasure(kindNamespaceName string) bool { - if cr.measurementRate == 0 { - return true - } - // 1 in measurementRate chance of true - if hashToNumber(kindNamespaceName)%cr.measurementRate == 0 { - return true - } - return false -} - -func (cr *ConfigDurationRecorder) End(kind, namespace, name string) time.Time { - if !cr.enabled { - return time.Time{} - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return time.Time{} - } - measurementTimestamp := time.Now() - cr.measurementsMu.Lock() - if m, ok := cr.measurements[kindNamespaceName]; ok { - if !m.end { - m.end = true - m.endTimestamp = measurementTimestamp - cr.measurements[kindNamespaceName] = m - // if there are no OVN measurements, trigger immediate processing - if len(m.ovnMeasurements) == 0 { - select { - case cr.triggerProcessCh <- kindNamespaceName: - default: - // doesn't matter if channel is full because the measurement will be processed later anyway - } - } - } - } else { - // This can happen if Start was rejected for a resource because a measurement was in-progress for this - // kind/namespace/name, but during execution of this resource, the measurement was completed and now no record - // is found. - measurementTimestamp = time.Time{} - } - cr.measurementsMu.Unlock() - return measurementTimestamp -} - -// AddOVN adds OVN config duration to an existing recording - previously started by calling function Start -// It will return ovsdb operations which a user can add to existing operations they wish to track. -// Upon successful transaction of the operations to the ovsdb server, the user of this function must call a call-back -// function to lock-in the request to measure and report. Failure to call the call-back function, will result in no OVN -// measurement and no metrics are reported. AddOVN will result in a no-op if Start isn't called previously for the same -// kind/namespace/name. -// If multiple AddOVN is called between Start and End for the same kind/namespace/name, then the -// OVN durations will be summed and added to the total. There is an assumption that processing of kind/namespace/name is -// sequential -func (cr *ConfigDurationRecorder) AddOVN(nbClient libovsdbclient.Client, kind, namespace, name string) ( - []ovsdb.Operation, func(), time.Time, error) { - if !cr.enabled { - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - cr.measurementsMu.RLock() - m, ok := cr.measurements[kindNamespaceName] - cr.measurementsMu.RUnlock() - if !ok { - // no measurement found, therefore no-op - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - if m.end { - // existing measurement in-progress and not processed yet, therefore no-op - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - nbGlobal := &nbdb.NBGlobal{} - nbGlobal, err := libovsdbops.GetNBGlobal(nbClient, nbGlobal) - if err != nil { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to find OVN Northbound NB_Global table"+ - " entry: %v", err) - } - if nbGlobal.NbCfg < 0 { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("nb_cfg is negative, failed to add OVN measurement") - } - //stop recording if we are close to overflow - if nbGlobal.NbCfg > maxNbCfg { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("unable to measure OVN due to nb_cfg being close to overflow") - } - ops, err := nbClient.Where(nbGlobal).Mutate(nbGlobal, model.Mutation{ - Field: &nbGlobal.NbCfg, - Mutator: ovsdb.MutateOperationAdd, - Value: 1, - }) - if err != nil { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to create update operation: %v", err) - } - ovnStartTimestamp := time.Now() - - return ops, func() { - // there can be a race condition here where we queue the wrong nbCfg value, but it is ok as long as it is - // less than or equal the hv_cfg value we see and this is the case because of atomic increments for nb_cfg - cr.measurementsMu.Lock() - m, ok = cr.measurements[kindNamespaceName] - if !ok { - klog.Errorf("Config duration recorder: expected a measurement entry. Call Start before AddOVN"+ - " for %s", kindNamespaceName) - cr.measurementsMu.Unlock() - return - } - m.ovnMeasurements = append(m.ovnMeasurements, ovnMeasurement{startTimestamp: ovnStartTimestamp, - nbCfg: nbGlobal.NbCfg + 1}) - cr.measurements[kindNamespaceName] = m - cr.measurementsMu.Unlock() - }, ovnStartTimestamp, nil -} - -// runMeasurementRateAdjuster will adjust the rate of measurements based on the number of nodes in the cluster and arg k -func (cr *ConfigDurationRecorder) runMeasurementRateAdjuster(kube kube.Interface, k float64, nodeCheckPeriod time.Duration, - stop <-chan struct{}) { - var currentMeasurementRate, newMeasurementRate uint64 - - updateMeasurementRate := func() { - if nodeCount, err := getNodeCount(kube); err != nil { - klog.Errorf("Config duration recorder: failed to update ticker duration considering node count: %v", err) - } else { - newMeasurementRate = uint64(math.Round(k * float64(nodeCount))) - if newMeasurementRate != currentMeasurementRate { - if newMeasurementRate > 0 { - currentMeasurementRate = newMeasurementRate - cr.measurementRate = newMeasurementRate - } - klog.V(5).Infof("Config duration recorder: updated measurement rate to approx 1 in"+ - " every %d requests", newMeasurementRate) - } - } - } - - // initial measurement rate adjustment - updateMeasurementRate() - - go func() { - nodeCheckTicker := time.NewTicker(nodeCheckPeriod) - for { - select { - case <-nodeCheckTicker.C: - updateMeasurementRate() - case <-stop: - nodeCheckTicker.Stop() - return - } - } - }() -} - -// processMeasurements manages the measurements map. It calculates metrics and cleans up finished or stale measurements -func (cr *ConfigDurationRecorder) processMeasurements(period time.Duration, updateOVNMeasurementCh chan hvCfgUpdate, - deleteOVNMeasurementCh chan int, stop <-chan struct{}) { - ticker := time.NewTicker(period) - var ovnKDelta, ovnDelta float64 - - for { - select { - case <-stop: - ticker.Stop() - return - // remove measurements if channel updateOVNMeasurementCh overflows, therefore we cannot trust existing measurements - case hvCfg := <-deleteOVNMeasurementCh: - cr.measurementsMu.Lock() - removeOVNMeasurements(cr.measurements, hvCfg) - cr.measurementsMu.Unlock() - case h := <-updateOVNMeasurementCh: - cr.measurementsMu.Lock() - cr.addHvCfg(h.hvCfg, h.timestamp) - cr.measurementsMu.Unlock() - // used for processing measurements that didn't require OVN measurement. Helps to keep measurement map small - case kindNamespaceName := <-cr.triggerProcessCh: - cr.measurementsMu.Lock() - m, ok := cr.measurements[kindNamespaceName] - if !ok { - klog.Errorf("Config duration recorder: expected measurement, but not found") - cr.measurementsMu.Unlock() - continue - } - if !m.end { - cr.measurementsMu.Unlock() - continue - } - if len(m.ovnMeasurements) != 0 { - cr.measurementsMu.Unlock() - continue - } - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took %v"+ - " seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) - delete(cr.measurements, kindNamespaceName) - cr.measurementsMu.Unlock() - // used for processing measurements that require OVN measurement or do not or are expired. - case <-ticker.C: - start := time.Now() - cr.measurementsMu.Lock() - // process and clean up measurements - for kindNamespaceName, m := range cr.measurements { - if start.After(m.expiresAt) { - // measurement may expire if OVN is degraded or End wasn't called - klog.Warningf("Config duration recorder: measurement expired for %s", kindNamespaceName) - delete(cr.measurements, kindNamespaceName) - continue - } - if !m.end { - // measurement didn't end yet, process later - continue - } - // for when no ovn measurements requested - if len(m.ovnMeasurements) == 0 { - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller"+ - " took %v seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) - delete(cr.measurements, kindNamespaceName) - continue - } - // for each kind/namespace/name, there can be multiple calls to AddOVN between start and end - // we sum all the OVN durations and add it to the start and end duration - // first lets make sure all OVN measurements are finished - if complete := allOVNMeasurementsComplete(m.ovnMeasurements); !complete { - continue - } - - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - ovnDelta = calculateOVNDuration(m.ovnMeasurements) - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta + ovnDelta) - metricNetworkProgrammingOVN.Observe(ovnDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took"+ - " %v seconds. OVN took %v seconds. Total took %v seconds", kindNamespaceName, ovnKDelta, - ovnDelta, ovnDelta+ovnKDelta) - delete(cr.measurements, kindNamespaceName) - } - cr.measurementsMu.Unlock() - } - } -} - -func (cr *ConfigDurationRecorder) addHvCfg(hvCfg, hvCfgTimestamp int) { - var altered bool - for i, m := range cr.measurements { - altered = false - for iOvnM, ovnM := range m.ovnMeasurements { - if ovnM.complete { - continue - } - if ovnM.nbCfg <= hvCfg { - ovnM.endTimestamp = time.UnixMilli(int64(hvCfgTimestamp)) - ovnM.complete = true - m.ovnMeasurements[iOvnM] = ovnM - altered = true - } - } - if altered { - cr.measurements[i] = m - } - } -} - -// removeOVNMeasurements remove any OVN measurements less than or equal argument hvCfg -func removeOVNMeasurements(measurements map[string]measurement, hvCfg int) { - for kindNamespaceName, m := range measurements { - var indexToDelete []int - for i, ovnM := range m.ovnMeasurements { - if ovnM.nbCfg <= hvCfg { - indexToDelete = append(indexToDelete, i) - } - } - if len(indexToDelete) == 0 { - continue - } - if len(indexToDelete) == len(m.ovnMeasurements) { - delete(measurements, kindNamespaceName) - } - for _, iDel := range indexToDelete { - m.ovnMeasurements = removeOVNMeasurement(m.ovnMeasurements, iDel) - } - measurements[kindNamespaceName] = m - } -} - -func removeOVNMeasurement(oM []ovnMeasurement, i int) []ovnMeasurement { - oM[i] = oM[len(oM)-1] - return oM[:len(oM)-1] -} -func hashToNumber(s string) uint64 { - h := fnv.New64() - h.Write([]byte(s)) - return h.Sum64() -} - -func calculateOVNDuration(ovnMeasurements []ovnMeasurement) float64 { - var totalDuration float64 - for _, oM := range ovnMeasurements { - if !oM.complete { - continue - } - totalDuration += oM.endTimestamp.Sub(oM.startTimestamp).Seconds() - } - return totalDuration -} - -func allOVNMeasurementsComplete(ovnMeasurements []ovnMeasurement) bool { - for _, oM := range ovnMeasurements { - if !oM.complete { - return false - } - } - return true -} - -// merge direct copy from k8 pkg/proxy/metrics/metrics.go -func merge(slices ...[]float64) []float64 { - result := make([]float64, 1) - for _, s := range slices { - result = append(result, s...) - } - return result -} - -func getNodeCount(kube kube.Interface) (int, error) { - nodes, err := kube.GetNodes() - if err != nil { - return 0, fmt.Errorf("unable to retrieve node list: %v", err) - } - return len(nodes), nil -} - // setNbE2eTimestamp return true if setting timestamp to NB global options is successful func setNbE2eTimestamp(ovnNBClient libovsdbclient.Client, timestamp int64) bool { // assumption that only first row is relevant in NB_Global table diff --git a/go-controller/pkg/metrics/ovs.go b/go-controller/pkg/metrics/ovs.go index b2cc1403a0..455142ae6b 100644 --- a/go-controller/pkg/metrics/ovs.go +++ b/go-controller/pkg/metrics/ovs.go @@ -18,6 +18,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -27,15 +28,15 @@ var ( // ovs datapath Metrics var metricOvsDpTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_total", Help: "Represents total number of datapaths on the system.", }) var metricOvsDp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp", Help: "A metric with a constant '1' value labeled by datapath " + "name present on the instance."}, @@ -46,8 +47,8 @@ var metricOvsDp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpIfTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_if_total", Help: "Represents the number of ports connected to the datapath."}, []string{ @@ -56,8 +57,8 @@ var metricOvsDpIfTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_total", Help: "Represents the number of flows in datapath."}, []string{ @@ -66,8 +67,8 @@ var metricOvsDpFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_hit", Help: "Represents number of packets matching the existing flows " + "while processing incoming packets in the datapath."}, @@ -77,8 +78,8 @@ var metricOvsDpFlowsLookupHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupMissed = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_missed", Help: "Represents the number of packets not matching any existing " + "flow and require user space processing."}, @@ -88,8 +89,8 @@ var metricOvsDpFlowsLookupMissed = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupLost = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_lost", Help: "number of packets destined for user space process but " + "subsequently dropped before reaching userspace."}, @@ -99,8 +100,8 @@ var metricOvsDpFlowsLookupLost = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpPacketsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_packets_total", Help: "Represents the total number of packets datapath processed " + "which is the sum of hit and missed."}, @@ -110,8 +111,8 @@ var metricOvsDpPacketsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsdpMasksHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_hit", Help: "Represents the total number of masks visited for matching incoming packets.", }, @@ -121,8 +122,8 @@ var metricOvsdpMasksHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpMasksTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_total", Help: "Represents the number of masks in a datapath."}, []string{ @@ -131,8 +132,8 @@ var metricOvsDpMasksTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpMasksHitRatio = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_hit_ratio", Help: "Represents the average number of masks visited per packet " + "the ratio between hit and total number of packets processed by the datapath."}, @@ -143,16 +144,16 @@ var metricOvsDpMasksHitRatio = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ovs bridge statistics & attributes metrics var metricOvsBridgeTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_total", Help: "Represents total number of OVS bridges on the system.", }, ) var metricOvsBridge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge", Help: "A metric with a constant '1' value labeled by bridge name " + "present on the instance."}, @@ -162,8 +163,8 @@ var metricOvsBridge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsBridgePortsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_ports_total", Help: "Represents the number of OVS ports on the bridge."}, []string{ @@ -172,8 +173,8 @@ var metricOvsBridgePortsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsBridgeFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_flows_total", Help: "Represents the number of OpenFlow flows on the OVS bridge."}, []string{ @@ -183,57 +184,57 @@ var metricOvsBridgeFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ovs interface metrics var metricOvsInterfaceResetsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_resets_total", Help: "The number of link state changes observed by Open vSwitch interface(s).", }) var metricOvsInterfaceRxDroppedTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_rx_dropped_total", Help: "The total number of received packets dropped by Open vSwitch interface(s).", }) var metricOvsInterfaceTxDroppedTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_tx_dropped_total", Help: "The total number of transmitted packets dropped by Open vSwitch interface(s).", }) var metricOvsInterfaceRxErrorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_rx_errors_total", Help: "The total number of received packets with errors by Open vSwitch interface(s).", }) var metricOvsInterfaceTxErrorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_tx_errors_total", Help: "The total number of transmitted packets with errors by Open vSwitch interface(s).", }) var metricOvsInterfaceCollisionsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_collisions_total", Help: "The total number of packet collisions transmitted by Open vSwitch interface(s).", }) var metricOvsInterfaceTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interfaces_total", Help: "The total number of Open vSwitch interface(s) created for pods", }) var MetricOvsInterfaceUpWait = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_up_wait_seconds_total", Help: "The total number of seconds that is required to wait for pod " + "Open vSwitch interface until its available", @@ -241,16 +242,16 @@ var MetricOvsInterfaceUpWait = prometheus.NewCounter(prometheus.CounterOpts{ // ovs memory metrics var metricOvsHandlersTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "handlers_total", Help: "Represents the number of handlers thread. This thread reads upcalls from dpif, " + "forwards each upcall's packet and possibly sets up a kernel flow as a cache.", }) var metricOvsRevalidatorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "revalidators_total", Help: "Represents the number of revalidators thread. This thread processes datapath flows, " + "updates OpenFlow statistics, and updates or removes them if necessary.", @@ -258,16 +259,16 @@ var metricOvsRevalidatorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ // ovs Hw offload metrics var metricOvsHwOffload = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "hw_offload", Help: "Represents whether netdev flow offload to hardware is enabled " + "or not -- false(0) and true(1).", }) var metricOvsTcPolicy = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "tc_policy", Help: "Represents the policy used with HW offloading " + "-- none(0), skip_sw(1), and skip_hw(2).", @@ -310,15 +311,15 @@ func ovsDatapathLookupsMetrics(output, datapath string) { } switch elem[0] { case "hit": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_hit", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_hit", elem[1]) datapathPacketsTotal += value metricOvsDpFlowsLookupHit.WithLabelValues(datapath).Set(value) case "missed": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_missed", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_missed", elem[1]) datapathPacketsTotal += value metricOvsDpFlowsLookupMissed.WithLabelValues(datapath).Set(value) case "lost": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_lost", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_lost", elem[1]) metricOvsDpFlowsLookupLost.WithLabelValues(datapath).Set(value) } } @@ -335,13 +336,13 @@ func ovsDatapathMasksMetrics(output, datapath string) { } switch elem[0] { case "hit": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_hit", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_hit", elem[1]) metricOvsdpMasksHit.WithLabelValues(datapath).Set(value) case "total": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_total", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_total", elem[1]) metricOvsDpMasksTotal.WithLabelValues(datapath).Set(value) case "hit/pkt": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_hit_ratio", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_hit_ratio", elem[1]) metricOvsDpMasksHitRatio.WithLabelValues(datapath).Set(value) } } @@ -419,7 +420,7 @@ func setOvsDatapathMetrics(ovsAppctl ovsClient, datapaths []string) (err error) datapathPortCount++ } else if strings.HasPrefix(output, "flows:") { flowFields := strings.Fields(output) - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_total", flowFields[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_total", flowFields[1]) metricOvsDpFlowsTotal.WithLabelValues(datapathName).Set(value) } } @@ -504,7 +505,7 @@ func getOvsBridgeOpenFlowsCount(ovsOfctl ovsClient, bridgeName string) (float64, if strings.HasPrefix(kvPair, "flow_count=") { value := strings.Split(kvPair, "=")[1] metricName := bridgeName + "flows_total" - return parseMetricToFloat(MetricOvsSubsystemVswitchd, metricName, value), nil + return parseMetricToFloat(types.MetricOvsSubsystemVswitchd, metricName, value), nil } } return 0, fmt.Errorf("ovs-ofctl dump-aggregate %s output didn't contain "+ @@ -595,11 +596,11 @@ func setOvsMemoryMetrics(ovsVswitchdAppctl ovsClient) (err error) { for _, kvPair := range strings.Fields(stdout) { if strings.HasPrefix(kvPair, "handlers:") { value := strings.Split(kvPair, ":")[1] - count := parseMetricToFloat(MetricOvsSubsystemVswitchd, "handlers_total", value) + count := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "handlers_total", value) metricOvsHandlersTotal.Set(count) } else if strings.HasPrefix(kvPair, "revalidators:") { value := strings.Split(kvPair, ":")[1] - count := parseMetricToFloat(MetricOvsSubsystemVswitchd, "revalidators_total", value) + count := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "revalidators_total", value) metricOvsRevalidatorsTotal.Set(count) } } @@ -846,7 +847,7 @@ func registerOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval getOvsVersionInfo(ovsDBClient) registry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, + Namespace: types.MetricOvsNamespace, Name: "build_info", Help: "A metric with a constant '1' value labeled by ovs version.", ConstLabels: prometheus.Labels{ @@ -890,18 +891,18 @@ func registerOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval registry.MustRegister(MetricOvsInterfaceUpWait) // Register the OVS coverage/show metrics componentCoverageShowMetricsMap[ovsVswitchd] = ovsVswitchdCoverageShowMetricsMap - registerCoverageShowMetrics(ovsVswitchd, MetricOvsNamespace, MetricOvsSubsystemVswitchd) + registerCoverageShowMetrics(ovsVswitchd, types.MetricOvsNamespace, types.MetricOvsSubsystemVswitchd) // When ovnkube-node is running in privileged mode, the hostPID will be set to true, // and therefore it can monitor OVS running on the host using PID. if !config.UnprivilegedMode { registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{ PidFn: prometheus.NewPidFileFn("/var/run/openvswitch/ovs-vswitchd.pid"), - Namespace: fmt.Sprintf("%s_%s", MetricOvsNamespace, MetricOvsSubsystemVswitchd), + Namespace: fmt.Sprintf("%s_%s", types.MetricOvsNamespace, types.MetricOvsSubsystemVswitchd), })) registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{ PidFn: prometheus.NewPidFileFn("/var/run/openvswitch/ovsdb-server.pid"), - Namespace: fmt.Sprintf("%s_%s", MetricOvsNamespace, MetricOvsSubsystemDB), + Namespace: fmt.Sprintf("%s_%s", types.MetricOvsNamespace, types.MetricOvsSubsystemDB), })) } diff --git a/go-controller/pkg/metrics/recorders/duration.go b/go-controller/pkg/metrics/recorders/duration.go new file mode 100644 index 0000000000..c0ae704e8f --- /dev/null +++ b/go-controller/pkg/metrics/recorders/duration.go @@ -0,0 +1,565 @@ +package recorders + +import ( + "fmt" + "hash/fnv" + "math" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" + + "github.com/ovn-org/libovsdb/cache" + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +const ( + updateOVNMeasurementChSize = 500 + deleteOVNMeasurementChSize = 50 + processChSize = 1000 + nbGlobalTable = "NB_Global" + //fixme: remove when bug is fixed in OVN (Red Hat bugzilla bug number 2074019). Also, handle overflow event. + maxNbCfg = math.MaxUint32 - 1000 + maxMeasurementLifetime = 20 * time.Minute +) + +var configDurationRegOnce sync.Once + +type ConfigDurationRecorder struct { + // rate at which measurements are allowed. Probabilistically, 1 in every measurementRate + measurementRate uint64 + measurements map[string]measurement + // controls RW access to measurements map + measurementsMu sync.RWMutex + // channel to trigger processing a measurement following call to End func. Channel string is kind/namespace/name + triggerProcessCh chan string + enabled bool +} + +type ovnMeasurement struct { + // time just before ovsdb tx is called + startTimestamp time.Time + // time when the nbCfg value and its associated configuration is applied to all nodes + endTimestamp time.Time + // OVN measurement complete - start and end timestamps are valid + complete bool + // nb_cfg value that started the measurement + nbCfg int +} + +// measurement stores a measurement attempt through OVN-Kubernetes controller and optionally OVN +type measurement struct { + // kubernetes kind e.g. pod or service + kind string + // time when Add is executed + startTimestamp time.Time + // time when End is executed + endTimestamp time.Time + // if true endTimestamp is valid + end bool + // time when this measurement expires. Set during Add + expiresAt time.Time + // OVN measurement(s) via AddOVN + ovnMeasurements []ovnMeasurement +} + +// hvCfgUpdate holds the information received from OVN Northbound event handler +type hvCfgUpdate struct { + // timestamp is in milliseconds + timestamp int + hvCfg int +} + +// global variable is needed because this functionality is accessed in many functions +var cdr *ConfigDurationRecorder + +// lock for accessing the cdr global variable +var cdrMutex sync.Mutex + +func GetConfigDurationRecorder() *ConfigDurationRecorder { + cdrMutex.Lock() + defer cdrMutex.Unlock() + if cdr == nil { + cdr = &ConfigDurationRecorder{} + } + return cdr +} + +// removeOVNMeasurements remove any OVN measurements less than or equal argument hvCfg +func removeOVNMeasurements(measurements map[string]measurement, hvCfg int) { + for kindNamespaceName, m := range measurements { + var indexToDelete []int + for i, ovnM := range m.ovnMeasurements { + if ovnM.nbCfg <= hvCfg { + indexToDelete = append(indexToDelete, i) + } + } + if len(indexToDelete) == 0 { + continue + } + if len(indexToDelete) == len(m.ovnMeasurements) { + delete(measurements, kindNamespaceName) + } + for _, iDel := range indexToDelete { + m.ovnMeasurements = removeOVNMeasurement(m.ovnMeasurements, iDel) + } + measurements[kindNamespaceName] = m + } +} + +var metricNetworkProgramming prometheus.ObserverVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, + Name: "network_programming_duration_seconds", + Help: "The duration to apply network configuration for a kind (e.g. pod, service, networkpolicy). " + + "Configuration includes add, update and delete events for each kind.", + Buckets: merge( + prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s + prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s + prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s + prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min + []string{ + "kind", + }) + +var metricNetworkProgrammingOVN = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, + Name: "network_programming_ovn_duration_seconds", + Help: "The duration for OVN to apply network configuration", + Buckets: merge( + prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s + prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s + prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s + prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min +) + +// Run monitors the config duration for OVN-Kube master to configure k8 kinds. A measurement maybe allowed and this is +// related to the number of k8 nodes, N [1] and by argument k [2] where there is a probability that 1 out of N*k +// measurement attempts are allowed. If k=0, all measurements are allowed. mUpdatePeriod determines the period to +// process and publish metrics +// [1] 1 0. The measurement rate is proportional to + // the number of nodes, N and argument k. 1 out of every N*k attempted measurements will succeed. + + // For the optional OVN measurement by calling AddOVN, when the CMS is about to make a transaction to configure + // whatever kind, a call to AddOVN function allows the caller to measure OVN duration. + // An ovsdb operation is returned to the caller of AddOVN, which they can bundle with their existing transactions + // sent to OVN which will tell OVN to measure how long it takes to configure all nodes with the config in the transaction. + // Config duration then waits for OVN to configure all nodes and calculates the time delta. + + // ** configuration duration recorder - caveats ** + // For the optional OVN recording, it does not give you an exact time duration for how long it takes to configure your + // k8 kind. When you are recording how long it takes OVN to complete your configuration to all nodes, other + // transactions may have occurred which may increases the overall time. You may also get longer processing times if one + // or more nodes are unavailable because we are measuring how long the functionality takes to apply to ALL nodes. + + // ** configuration duration recorder - How the duration of the config is measured within OVN ** + // We increment the nb_cfg integer value in the NB_Global table. + // ovn-northd notices the nb_cfg change and copies the nb_cfg value to SB_Global table field nb_cfg along with any + // other configuration that is changed in OVN Northbound database. + // All ovn-controllers detect nb_cfg value change and generate a 'barrier' on the openflow connection to the + // nodes ovs-vswitchd. Once ovn-controllers receive the 'barrier processed' reply from ovs-vswitchd which + // indicates that all relevant openflow operations associated with NB_Globals nb_cfg value have been + // propagated to the nodes OVS, it copies the SB_Global nb_cfg value to its Chassis_Private table nb_cfg record. + // ovn-northd detects changes to the Chassis_Private startRecords and computes the minimum nb_cfg for all Chassis_Private + // nb_cfg and stores this in NB_Global hv_cfg field along with a timestamp to field hv_cfg_timestamp which + // reflects the time when the slowest chassis catches up with the northbound configuration. + configDurationRegOnce.Do(func() { + prometheus.MustRegister(metricNetworkProgramming) + prometheus.MustRegister(metricNetworkProgrammingOVN) + }) + + cr.measurements = make(map[string]measurement) + // watch node count and adjust measurement rate if node count changes + cr.runMeasurementRateAdjuster(wf, k, time.Hour, stop) + // we currently do not clean the following channels up upon exit + cr.triggerProcessCh = make(chan string, processChSize) + updateOVNMeasurementCh := make(chan hvCfgUpdate, updateOVNMeasurementChSize) + deleteOVNMeasurementCh := make(chan int, deleteOVNMeasurementChSize) + go cr.processMeasurements(workerLoopPeriod, updateOVNMeasurementCh, deleteOVNMeasurementCh, stop) + + nbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, old model.Model, new model.Model) { + if table != nbGlobalTable { + return + } + oldRow := old.(*nbdb.NBGlobal) + newRow := new.(*nbdb.NBGlobal) + + if oldRow.HvCfg != newRow.HvCfg && oldRow.HvCfgTimestamp != newRow.HvCfgTimestamp && newRow.HvCfgTimestamp > 0 { + select { + case updateOVNMeasurementCh <- hvCfgUpdate{hvCfg: newRow.HvCfg, timestamp: newRow.HvCfgTimestamp}: + default: + klog.Warning("Config duration recorder: unable to update OVN measurement") + select { + case deleteOVNMeasurementCh <- newRow.HvCfg: + default: + } + } + } + }, + }) + cr.enabled = true +} + +// Start allows the caller to attempt measurement of a control plane configuration duration, as a metric, +// the duration between functions Start and End. Optionally, if you wish to record OVN config duration, +// call AddOVN which will add the duration for OVN to apply the configuration to all nodes. +// The caller must pass kind,namespace,name which will be used to determine if the object +// is allowed to record. To allow no locking, each go routine that calls this function, can determine itself +// if it is allowed to measure. +// There is a mandatory two-step process to complete a measurement. +// Step 1) Call Start when you wish to begin a measurement - ideally when processing for the object starts +// Step 2) Call End which will complete a measurement +// Optionally, call AddOVN when you are making a transaction to OVN in order to add on the OVN duration to an existing +// measurement. This must be called between Start and End. Not every call to Start will result in a measurement +// and the rate of measurements depends on the number of nodes and function Run arg k. +// Only one measurement for a kind/namespace/name is allowed until the current measurement is Ended (via End) and +// processed. This is guaranteed by workqueues (even with multiple workers) and informer event handlers. +func (cr *ConfigDurationRecorder) Start(kind, namespace, name string) (time.Time, bool) { + if !cr.enabled { + return time.Time{}, false + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return time.Time{}, false + } + measurementTimestamp := time.Now() + cr.measurementsMu.Lock() + _, found := cr.measurements[kindNamespaceName] + // we only record for measurements that aren't in-progress + if !found { + cr.measurements[kindNamespaceName] = measurement{kind: kind, startTimestamp: measurementTimestamp, + expiresAt: measurementTimestamp.Add(maxMeasurementLifetime)} + } + cr.measurementsMu.Unlock() + return measurementTimestamp, !found +} + +// allowedToMeasure determines if we are allowed to measure or not. To avoid the cost of synchronisation by using locks, +// we use probability. For a value of kindNamespaceName that returns true, it will always return true. +func (cr *ConfigDurationRecorder) allowedToMeasure(kindNamespaceName string) bool { + if cr.measurementRate == 0 { + return true + } + // 1 in measurementRate chance of true + if hashToNumber(kindNamespaceName)%cr.measurementRate == 0 { + return true + } + return false +} + +func (cr *ConfigDurationRecorder) End(kind, namespace, name string) time.Time { + if !cr.enabled { + return time.Time{} + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return time.Time{} + } + measurementTimestamp := time.Now() + cr.measurementsMu.Lock() + if m, ok := cr.measurements[kindNamespaceName]; ok { + if !m.end { + m.end = true + m.endTimestamp = measurementTimestamp + cr.measurements[kindNamespaceName] = m + // if there are no OVN measurements, trigger immediate processing + if len(m.ovnMeasurements) == 0 { + select { + case cr.triggerProcessCh <- kindNamespaceName: + default: + // doesn't matter if channel is full because the measurement will be processed later anyway + } + } + } + } else { + // This can happen if Start was rejected for a resource because a measurement was in-progress for this + // kind/namespace/name, but during execution of this resource, the measurement was completed and now no record + // is found. + measurementTimestamp = time.Time{} + } + cr.measurementsMu.Unlock() + return measurementTimestamp +} + +// AddOVN adds OVN config duration to an existing recording - previously started by calling function Start +// It will return ovsdb operations which a user can add to existing operations they wish to track. +// Upon successful transaction of the operations to the ovsdb server, the user of this function must call a call-back +// function to lock-in the request to measure and report. Failure to call the call-back function, will result in no OVN +// measurement and no metrics are reported. AddOVN will result in a no-op if Start isn't called previously for the same +// kind/namespace/name. +// If multiple AddOVN is called between Start and End for the same kind/namespace/name, then the +// OVN durations will be summed and added to the total. There is an assumption that processing of kind/namespace/name is +// sequential +func (cr *ConfigDurationRecorder) AddOVN(nbClient libovsdbclient.Client, kind, namespace, name string) ( + []ovsdb.Operation, func(), time.Time, error) { + if !cr.enabled { + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + cr.measurementsMu.RLock() + m, ok := cr.measurements[kindNamespaceName] + cr.measurementsMu.RUnlock() + if !ok { + // no measurement found, therefore no-op + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + if m.end { + // existing measurement in-progress and not processed yet, therefore no-op + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + nbGlobal := &nbdb.NBGlobal{} + nbGlobal, err := libovsdbops.GetNBGlobal(nbClient, nbGlobal) + if err != nil { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to find OVN Northbound NB_Global table"+ + " entry: %v", err) + } + if nbGlobal.NbCfg < 0 { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("nb_cfg is negative, failed to add OVN measurement") + } + //stop recording if we are close to overflow + if nbGlobal.NbCfg > maxNbCfg { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("unable to measure OVN due to nb_cfg being close to overflow") + } + ops, err := nbClient.Where(nbGlobal).Mutate(nbGlobal, model.Mutation{ + Field: &nbGlobal.NbCfg, + Mutator: ovsdb.MutateOperationAdd, + Value: 1, + }) + if err != nil { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to create update operation: %v", err) + } + ovnStartTimestamp := time.Now() + + return ops, func() { + // there can be a race condition here where we queue the wrong nbCfg value, but it is ok as long as it is + // less than or equal the hv_cfg value we see and this is the case because of atomic increments for nb_cfg + cr.measurementsMu.Lock() + m, ok = cr.measurements[kindNamespaceName] + if !ok { + klog.Errorf("Config duration recorder: expected a measurement entry. Call Start before AddOVN"+ + " for %s", kindNamespaceName) + cr.measurementsMu.Unlock() + return + } + m.ovnMeasurements = append(m.ovnMeasurements, ovnMeasurement{startTimestamp: ovnStartTimestamp, + nbCfg: nbGlobal.NbCfg + 1}) + cr.measurements[kindNamespaceName] = m + cr.measurementsMu.Unlock() + }, ovnStartTimestamp, nil +} + +// runMeasurementRateAdjuster will adjust the rate of measurements based on the number of nodes in the cluster and arg k +func (cr *ConfigDurationRecorder) runMeasurementRateAdjuster(wf *factory.WatchFactory, k float64, nodeCheckPeriod time.Duration, + stop <-chan struct{}) { + var currentMeasurementRate, newMeasurementRate uint64 + + updateMeasurementRate := func() { + if nodeCount, err := getNodeCount(wf); err != nil { + klog.Errorf("Config duration recorder: failed to update ticker duration considering node count: %v", err) + } else { + newMeasurementRate = uint64(math.Round(k * float64(nodeCount))) + if newMeasurementRate != currentMeasurementRate { + if newMeasurementRate > 0 { + currentMeasurementRate = newMeasurementRate + cr.measurementRate = newMeasurementRate + } + klog.V(5).Infof("Config duration recorder: updated measurement rate to approx 1 in"+ + " every %d requests", newMeasurementRate) + } + } + } + + // initial measurement rate adjustment + updateMeasurementRate() + + go func() { + nodeCheckTicker := time.NewTicker(nodeCheckPeriod) + for { + select { + case <-nodeCheckTicker.C: + updateMeasurementRate() + case <-stop: + nodeCheckTicker.Stop() + return + } + } + }() +} + +// processMeasurements manages the measurements map. It calculates metrics and cleans up finished or stale measurements +func (cr *ConfigDurationRecorder) processMeasurements(period time.Duration, updateOVNMeasurementCh chan hvCfgUpdate, + deleteOVNMeasurementCh chan int, stop <-chan struct{}) { + ticker := time.NewTicker(period) + var ovnKDelta, ovnDelta float64 + + for { + select { + case <-stop: + ticker.Stop() + return + // remove measurements if channel updateOVNMeasurementCh overflows, therefore we cannot trust existing measurements + case hvCfg := <-deleteOVNMeasurementCh: + cr.measurementsMu.Lock() + removeOVNMeasurements(cr.measurements, hvCfg) + cr.measurementsMu.Unlock() + case h := <-updateOVNMeasurementCh: + cr.measurementsMu.Lock() + cr.addHvCfg(h.hvCfg, h.timestamp) + cr.measurementsMu.Unlock() + // used for processing measurements that didn't require OVN measurement. Helps to keep measurement map small + case kindNamespaceName := <-cr.triggerProcessCh: + cr.measurementsMu.Lock() + m, ok := cr.measurements[kindNamespaceName] + if !ok { + klog.Errorf("Config duration recorder: expected measurement, but not found") + cr.measurementsMu.Unlock() + continue + } + if !m.end { + cr.measurementsMu.Unlock() + continue + } + if len(m.ovnMeasurements) != 0 { + cr.measurementsMu.Unlock() + continue + } + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took %v"+ + " seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) + delete(cr.measurements, kindNamespaceName) + cr.measurementsMu.Unlock() + // used for processing measurements that require OVN measurement or do not or are expired. + case <-ticker.C: + start := time.Now() + cr.measurementsMu.Lock() + // process and clean up measurements + for kindNamespaceName, m := range cr.measurements { + if start.After(m.expiresAt) { + // measurement may expire if OVN is degraded or End wasn't called + klog.Warningf("Config duration recorder: measurement expired for %s", kindNamespaceName) + delete(cr.measurements, kindNamespaceName) + continue + } + if !m.end { + // measurement didn't end yet, process later + continue + } + // for when no ovn measurements requested + if len(m.ovnMeasurements) == 0 { + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller"+ + " took %v seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) + delete(cr.measurements, kindNamespaceName) + continue + } + // for each kind/namespace/name, there can be multiple calls to AddOVN between start and end + // we sum all the OVN durations and add it to the start and end duration + // first lets make sure all OVN measurements are finished + if complete := allOVNMeasurementsComplete(m.ovnMeasurements); !complete { + continue + } + + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + ovnDelta = calculateOVNDuration(m.ovnMeasurements) + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta + ovnDelta) + metricNetworkProgrammingOVN.Observe(ovnDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took"+ + " %v seconds. OVN took %v seconds. Total took %v seconds", kindNamespaceName, ovnKDelta, + ovnDelta, ovnDelta+ovnKDelta) + delete(cr.measurements, kindNamespaceName) + } + cr.measurementsMu.Unlock() + } + } +} + +func (cr *ConfigDurationRecorder) addHvCfg(hvCfg, hvCfgTimestamp int) { + var altered bool + for i, m := range cr.measurements { + altered = false + for iOvnM, ovnM := range m.ovnMeasurements { + if ovnM.complete { + continue + } + if ovnM.nbCfg <= hvCfg { + ovnM.endTimestamp = time.UnixMilli(int64(hvCfgTimestamp)) + ovnM.complete = true + m.ovnMeasurements[iOvnM] = ovnM + altered = true + } + } + if altered { + cr.measurements[i] = m + } + } +} + +func getNodeCount(wf *factory.WatchFactory) (int, error) { + nodes, err := wf.GetNodes() + if err != nil { + return 0, fmt.Errorf("unable to retrieve node list: %v", err) + } + return len(nodes), nil +} + +func removeOVNMeasurement(oM []ovnMeasurement, i int) []ovnMeasurement { + oM[i] = oM[len(oM)-1] + return oM[:len(oM)-1] +} +func hashToNumber(s string) uint64 { + h := fnv.New64() + h.Write([]byte(s)) + return h.Sum64() +} + +func calculateOVNDuration(ovnMeasurements []ovnMeasurement) float64 { + var totalDuration float64 + for _, oM := range ovnMeasurements { + if !oM.complete { + continue + } + totalDuration += oM.endTimestamp.Sub(oM.startTimestamp).Seconds() + } + return totalDuration +} + +func allOVNMeasurementsComplete(ovnMeasurements []ovnMeasurement) bool { + for _, oM := range ovnMeasurements { + if !oM.complete { + return false + } + } + return true +} + +// merge direct copy from k8 pkg/proxy/metrics/metrics.go +func merge(slices ...[]float64) []float64 { + result := make([]float64, 1) + for _, s := range slices { + result = append(result, s...) + } + return result +} diff --git a/go-controller/pkg/metrics/ovnkube_controller_test.go b/go-controller/pkg/metrics/recorders/duration_test.go similarity index 86% rename from go-controller/pkg/metrics/ovnkube_controller_test.go rename to go-controller/pkg/metrics/recorders/duration_test.go index 1ff008db59..ee436d8d1b 100644 --- a/go-controller/pkg/metrics/ovnkube_controller_test.go +++ b/go-controller/pkg/metrics/recorders/duration_test.go @@ -1,4 +1,4 @@ -package metrics +package recorders import ( "fmt" @@ -14,17 +14,29 @@ import ( "github.com/ovn-org/libovsdb/client" + egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" + egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/mocks" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -func setupOvn(nbData libovsdbtest.TestSetup) (client.Client, client.Client, *libovsdbtest.Context) { - nbClient, sbClient, cleanup, err := libovsdbtest.NewNBSBTestHarness(nbData) +func setHvCfg(nbClient client.Client, hvCfg int, hvCfgTimestamp time.Time) { + nbGlobal := nbdb.NBGlobal{} + nbGlobalResp, err := libovsdbops.GetNBGlobal(nbClient, &nbGlobal) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + nbGlobalResp.HvCfg = hvCfg + nbGlobalResp.HvCfgTimestamp = int(hvCfgTimestamp.UnixMilli()) + ops, err := nbClient.Where(nbGlobalResp).Update(nbGlobalResp, &nbGlobalResp.HvCfg, &nbGlobalResp.HvCfgTimestamp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ops).To(gomega.HaveLen(1)) + _, err = libovsdbops.TransactAndCheck(nbClient, ops) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return sbClient, nbClient, cleanup } func getKubeClient(nodeCount int) *kube.Kube { @@ -40,23 +52,16 @@ func getKubeClient(nodeCount int) *kube.Kube { return &kube.Kube{KClient: kubeFakeClient} } -func setHvCfg(nbClient client.Client, hvCfg int, hvCfgTimestamp time.Time) { - nbGlobal := nbdb.NBGlobal{} - nbGlobalResp, err := libovsdbops.GetNBGlobal(nbClient, &nbGlobal) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nbGlobalResp.HvCfg = hvCfg - nbGlobalResp.HvCfgTimestamp = int(hvCfgTimestamp.UnixMilli()) - ops, err := nbClient.Where(nbGlobalResp).Update(nbGlobalResp, &nbGlobalResp.HvCfg, &nbGlobalResp.HvCfgTimestamp) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ops).To(gomega.HaveLen(1)) - _, err = libovsdbops.TransactAndCheck(nbClient, ops) +func setupOvn(nbData libovsdbtest.TestSetup) (client.Client, client.Client, *libovsdbtest.Context) { + nbClient, sbClient, cleanup, err := libovsdbtest.NewNBSBTestHarness(nbData) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return sbClient, nbClient, cleanup } var _ = ginkgo.Describe("Config Duration Operations", func() { var ( instance *ConfigDurationRecorder - k *kube.Kube + wf *factory.WatchFactory nbClient client.Client cleanup *libovsdbtest.Context stop chan struct{} @@ -69,7 +74,23 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { ginkgo.BeforeEach(func() { cdr = nil instance = GetConfigDurationRecorder() - k = getKubeClient(1) + k := getKubeClient(1) + egressFirewallFakeClient := &egressfirewallfake.Clientset{} + egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} + fakeClient := &util.OVNClientset{ + KubeClient: k.KClient, + EgressIPClient: egressIPFakeClient, + EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, + } + + var err error + wf, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = wf.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + stop = make(chan struct{}) _, nbClient, cleanup = setupOvn(libovsdbtest.TestSetup{ NBData: []libovsdbtest.TestData{&nbdb.NBGlobal{UUID: "cd-op-uuid"}}}) @@ -78,11 +99,12 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { ginkgo.AfterEach(func() { cleanup.Cleanup() close(stop) + wf.Stop() }) ginkgo.Context("Runtime", func() { ginkgo.It("records correctly", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock startTimestamp, ok := instance.Start("pod", testNamespaceA, testPodNameA) @@ -104,7 +126,7 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("records correctly with OVN latency", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock startTimestamp, ok := instance.Start("pod", testNamespaceA, testPodNameA) @@ -134,7 +156,7 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("records multiple different objs including adding OVN latency", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock // recording 1 @@ -186,13 +208,13 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("denies recording when no start called", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) ops, _, _, _ := instance.AddOVN(nbClient, "pod", testNamespaceA, testPodNameA) gomega.Expect(ops).Should(gomega.BeEmpty()) }) ginkgo.It("allows multiple addOVN records for the same obj", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock // recording 1 diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 02a110b5d7..f9f3b36ec5 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -830,7 +830,7 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { } } - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { return fmt.Errorf("error retrieving node %s: %v", nc.name, err) } @@ -895,7 +895,7 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { // First wait for the node logical switch to be created by the Master, timeout is 300s. err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 300*time.Second, true, func(_ context.Context) (bool, error) { - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { klog.Infof("Waiting to retrieve node %s: %v", nc.name, err) return false, nil } @@ -999,7 +999,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Errorf("Setting klog \"loglevel\" to 5 failed, err: %v", err) } - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { return fmt.Errorf("error retrieving node %s: %v", nc.name, err) } @@ -1079,7 +1079,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 300*time.Second, true, func(_ context.Context) (bool, error) { // we loop through all the nodes in the cluster and ensure ovnkube-controller has finished creating the LRSR required for pod2pod overlay communication if !syncNodes { - nodes, err := nc.Kube.GetNodes() + nodes, err := nc.watchFactory.GetNodes() if err != nil { err1 = fmt.Errorf("upgrade hack: error retrieving node %s: %v", nc.name, err) return false, nil diff --git a/go-controller/pkg/node/gateway_egressip_test.go b/go-controller/pkg/node/gateway_egressip_test.go index bd09738200..db43f7450a 100644 --- a/go-controller/pkg/node/gateway_egressip_test.go +++ b/go-controller/pkg/node/gateway_egressip_test.go @@ -70,7 +70,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -85,7 +85,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -100,7 +100,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).Should(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -120,7 +120,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.addEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -143,7 +143,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -168,7 +168,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err = addrMgr.updateEgressIP(assignedEIP, unassignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -197,7 +197,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err = addrMgr.updateEgressIP(assignedEIP1, assignedEIP2) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -227,7 +227,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err = addrMgr.deleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -243,7 +243,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { isUpdated, err := addrMgr.deleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, @@ -267,7 +267,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipUnassigned3 := getEIPNotAssignedToNode(mark3, ipV4Addr3) err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -291,7 +291,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -308,7 +308,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipAssigned := getEIPAssignedToNode(nodeName, "", ipV4Addr) err := addrMgr.syncEgressIP([]interface{}{eipAssigned}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.BeEmpty()) }) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 7312521ab7..f8bd7e94c6 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -458,7 +458,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() @@ -501,7 +501,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation err = testNS.Do(func(ns.NetNS) error { @@ -537,7 +537,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() @@ -579,7 +579,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation err = testNS.Do(func(ns.NetNS) error { diff --git a/go-controller/pkg/ovn/base_event_handler.go b/go-controller/pkg/ovn/base_event_handler.go index b08bc30b99..f54afc9925 100644 --- a/go-controller/pkg/ovn/base_event_handler.go +++ b/go-controller/pkg/ovn/base_event_handler.go @@ -14,7 +14,7 @@ import ( egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -213,11 +213,11 @@ func (h *baseNetworkControllerEventHandler) recordAddEvent(objType reflect.Type, case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording add event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -227,11 +227,11 @@ func (h *baseNetworkControllerEventHandler) recordUpdateEvent(objType reflect.Ty case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording update event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording update event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -241,11 +241,11 @@ func (h *baseNetworkControllerEventHandler) recordDeleteEvent(objType reflect.Ty case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording delete event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording delete event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -255,10 +255,10 @@ func (h *baseNetworkControllerEventHandler) recordSuccessEvent(objType reflect.T case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording success event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording success event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().End("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().End("multinetworkpolicy", mnp.Namespace, mnp.Name) } } diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index ae38418d70..db56f42cb9 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -33,6 +33,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" @@ -317,7 +318,7 @@ func (bnc *BaseNetworkController) GetLogicalPortName(pod *corev1.Pod, nadName st func (bnc *BaseNetworkController) AddConfigDurationRecord(kind, namespace, name string) ( []ovsdb.Operation, func(), time.Time, error) { if !bnc.IsSecondary() { - return metrics.GetConfigDurationRecorder().AddOVN(bnc.nbClient, kind, namespace, name) + return recorders.GetConfigDurationRecorder().AddOVN(bnc.nbClient, kind, namespace, name) } // TBD: no op for secondary network for now return []ovsdb.Operation{}, func() {}, time.Time{}, nil diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go b/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go index cd2e636ea8..dcf1fb6aab 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go @@ -8,14 +8,14 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // Descriptors used by the ANPControllerCollector below. var ( anpRuleCountDesc = prometheus.NewDesc( - prometheus.BuildFQName(metrics.MetricOvnkubeNamespace, metrics.MetricOvnkubeSubsystemController, "admin_network_policies_rules"), + prometheus.BuildFQName(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController, "admin_network_policies_rules"), "The total number of rules across all admin network policies in the cluster", []string{ "direction", // direction is either "ingress" or "egress"; so cardinality is max 2 for this label @@ -23,7 +23,7 @@ var ( }, nil, ) banpRuleCountDesc = prometheus.NewDesc( - prometheus.BuildFQName(metrics.MetricOvnkubeNamespace, metrics.MetricOvnkubeSubsystemController, "baseline_admin_network_policies_rules"), + prometheus.BuildFQName(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController, "baseline_admin_network_policies_rules"), "The total number of rules across all baseline admin network policies in the cluster", []string{ "direction", // direction is either "ingress" or "egress"; so cardinality is max 2 for this label diff --git a/go-controller/pkg/ovn/controller/network_qos/metrics.go b/go-controller/pkg/ovn/controller/network_qos/metrics.go index 96fa30834d..de5c6872ca 100644 --- a/go-controller/pkg/ovn/controller/network_qos/metrics.go +++ b/go-controller/pkg/ovn/controller/network_qos/metrics.go @@ -3,15 +3,15 @@ package networkqos import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // Metrics to be exposed var ( nqosCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_network_qoses", Help: "The total number of network qoses in the cluster", }, @@ -20,8 +20,8 @@ var ( nqosOvnOperationDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_ovn_operation_duration_ms", Help: "Time spent on reconciling a NetworkQoS event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -31,8 +31,8 @@ var ( nqosReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_creation_duration_ms", Help: "Time spent on reconciling a NetworkQoS event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -42,8 +42,8 @@ var ( nqosPodReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_deletion_duration_ms", Help: "Time spent on reconciling a Pod event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -53,8 +53,8 @@ var ( nqosNamespaceReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_ns_reconcile_duration_ms", Help: "Time spent on reconciling Namespace change for all Pods related to NetworkQoSes", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -64,8 +64,8 @@ var ( nqosStatusPatchDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_status_patch_duration_ms", Help: "Time spent on patching the status of a NetworkQoS", }, diff --git a/go-controller/pkg/ovn/controller/services/loadbalancer.go b/go-controller/pkg/ovn/controller/services/loadbalancer.go index ba4eebc43a..8c3d1c9114 100644 --- a/go-controller/pkg/ovn/controller/services/loadbalancer.go +++ b/go-controller/pkg/ovn/controller/services/loadbalancer.go @@ -14,7 +14,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -283,7 +283,7 @@ func EnsureLBs(nbClient libovsdbclient.Client, service *corev1.Service, existing return err } - recordOps, txOkCallBack, _, err := metrics.GetConfigDurationRecorder().AddOVN(nbClient, "service", + recordOps, txOkCallBack, _, err := recorders.GetConfigDurationRecorder().AddOVN(nbClient, "service", service.Namespace, service.Name) if err != nil { klog.Errorf("Failed to record config duration: %v", err) diff --git a/go-controller/pkg/ovn/controller/services/services_controller.go b/go-controller/pkg/ovn/controller/services/services_controller.go index 031a38a0d6..e03ad40b5c 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller.go +++ b/go-controller/pkg/ovn/controller/services/services_controller.go @@ -36,6 +36,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -282,7 +283,7 @@ func (c *Controller) handleErr(err error, key string) { klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key) } if err == nil { - metrics.GetConfigDurationRecorder().End("service", ns, name) + recorders.GetConfigDurationRecorder().End("service", ns, name) c.queue.Forget(key) return } @@ -296,7 +297,7 @@ func (c *Controller) handleErr(err error, key string) { } klog.Warningf("Dropping service %q out of the queue for network=%s: %v", key, c.netInfo.GetNetworkName(), err) - metrics.GetConfigDurationRecorder().End("service", ns, name) + recorders.GetConfigDurationRecorder().End("service", ns, name) c.queue.Forget(key) utilruntime.HandleError(err) } @@ -609,7 +610,7 @@ func (c *Controller) onServiceAdd(obj interface{}) { if c.skipService(service.Name, service.Namespace) { return } - metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) + recorders.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) klog.V(5).Infof("Adding service %s for network=%s", key, c.netInfo.GetNetworkName()) c.queue.Add(key) } @@ -631,7 +632,7 @@ func (c *Controller) onServiceUpdate(oldObj, newObj interface{}) { return } - metrics.GetConfigDurationRecorder().Start("service", newService.Namespace, newService.Name) + recorders.GetConfigDurationRecorder().Start("service", newService.Namespace, newService.Name) c.queue.Add(key) } } @@ -651,7 +652,7 @@ func (c *Controller) onServiceDelete(obj interface{}) { klog.V(4).Infof("Deleting service %s for network=%s", key, c.netInfo.GetNetworkName()) - metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) + recorders.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) c.queue.Add(key) } diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index cf6886e846..3ea00bcb17 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -22,6 +22,7 @@ import ( egressqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -356,7 +357,7 @@ func (oc *DefaultNetworkController) Stop() { // // If true, then either quit or perform a complete reconfiguration of the cluster (recreate switches/routers with new subnet values) func (oc *DefaultNetworkController) init() error { - existingNodes, err := oc.kube.GetNodes() + existingNodes, err := oc.watchFactory.GetNodes() if err != nil { klog.Errorf("Error in fetching nodes: %v", err) return err @@ -657,11 +658,11 @@ func (h *defaultNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording add event on pod %s/%s", pod.Namespace, pod.Name) h.oc.podRecorder.AddPod(pod.UID) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording add event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -671,11 +672,11 @@ func (h *defaultNetworkControllerEventHandler) RecordUpdateEvent(obj interface{} case factory.PodType: pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording update event on pod %s/%s", pod.Namespace, pod.Name) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording update event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -686,11 +687,11 @@ func (h *defaultNetworkControllerEventHandler) RecordDeleteEvent(obj interface{} pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording delete event on pod %s/%s", pod.Namespace, pod.Name) h.oc.podRecorder.CleanPod(pod.UID) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording delete event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -700,11 +701,11 @@ func (h *defaultNetworkControllerEventHandler) RecordSuccessEvent(obj interface{ case factory.PodType: pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording success event on pod %s/%s", pod.Namespace, pod.Name) - metrics.GetConfigDurationRecorder().End("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().End("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording success event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) } } diff --git a/go-controller/pkg/ovn/hybrid.go b/go-controller/pkg/ovn/hybrid.go index f7debe4ec8..6386d719e9 100644 --- a/go-controller/pkg/ovn/hybrid.go +++ b/go-controller/pkg/ovn/hybrid.go @@ -171,7 +171,7 @@ func (oc *DefaultNetworkController) setupHybridLRPolicySharedGw(nodeSubnets []*n // In cases of OpenShift SDN live migration, where config.HybridOverlay.ClusterSubnets is not provided, we // use the host subnets allocated by OpenShiftSDN as the hybrid-overlay-node-subnet and set up hybrid // overlay routes/policies to these subnets. - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } @@ -407,7 +407,7 @@ func (oc *DefaultNetworkController) removeRoutesToHONodeSubnet(nodeName string, } // Delete routes to HO subnet from GRs - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 26eb1277fe..78b5ce2402 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -730,7 +730,7 @@ func (oc *DefaultNetworkController) addUpdateHoNodeEvent(node *corev1.Node) erro return err } - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index ad545b94d1..914bf7f326 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -1167,7 +1167,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { types.OVNClusterRouter, badRoute, p) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Syncing node with OVNK") - node, err := oc.kube.GetNode(testNode.Name) + node, err := oc.kube.GetNodeForWindows(testNode.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = oc.syncNodeManagementPortDefault(node, node.Name, []*net.IPNet{subnet}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index 0f2a9f1058..801777854a 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -276,7 +276,7 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { err = o.eIPController.SyncLocalNodeZonesCache() gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "syncing Nodes OVN zones status must succeed to support EgressIP") - existingNodes, err := o.controller.kube.GetNodes() + existingNodes, err := o.controller.watchFactory.GetNodes() if err == nil { for _, node := range existingNodes { o.controller.localZoneNodes.Store(node.Name, true) diff --git a/go-controller/pkg/ovn/secondary_localnet_network_controller.go b/go-controller/pkg/ovn/secondary_localnet_network_controller.go index 3c6fef1027..4046f819ce 100644 --- a/go-controller/pkg/ovn/secondary_localnet_network_controller.go +++ b/go-controller/pkg/ovn/secondary_localnet_network_controller.go @@ -15,7 +15,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -65,7 +65,7 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) RecordAddEvent(obj inte case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } diff --git a/go-controller/pkg/ovndbmanager/ovndbmanager.go b/go-controller/pkg/ovndbmanager/ovndbmanager.go index 2c5fac6ab9..7e26bb2b98 100644 --- a/go-controller/pkg/ovndbmanager/ovndbmanager.go +++ b/go-controller/pkg/ovndbmanager/ovndbmanager.go @@ -226,7 +226,7 @@ func ensureClusterRaftMembership(db *util.OvsDbProperties, kclient kube.Interfac r = regexp.MustCompile(`([a-z0-9]{4}) at ` + dbServerRegexp) members := r.FindAllStringSubmatch(out, -1) kickedMembersCount := 0 - dbPods, err := kclient.GetPods(config.Kubernetes.OVNConfigNamespace, metav1.ListOptions{ + dbPods, err := kclient.GetPodsForDBChecker(config.Kubernetes.OVNConfigNamespace, metav1.ListOptions{ LabelSelector: labels.Set(map[string]string{"ovn-db-pod": "true"}).String(), }) if err != nil { diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 32c267b551..0577f54005 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -313,4 +313,17 @@ const ( // NFTNoPMTUDRemoteNodeIPsv6 is a set used to track remote node IPs that do not belong to // the local node's subnet. NFTNoPMTUDRemoteNodeIPsv6 = "no-pmtud-remote-node-ips-v6" + + // Metrics + MetricOvnkubeNamespace = "ovnkube" + MetricOvnkubeSubsystemController = "controller" + MetricOvnkubeSubsystemClusterManager = "clustermanager" + MetricOvnkubeSubsystemNode = "node" + MetricOvnNamespace = "ovn" + MetricOvnSubsystemDB = "db" + MetricOvnSubsystemNorthd = "northd" + MetricOvnSubsystemController = "controller" + MetricOvsNamespace = "ovs" + MetricOvsSubsystemVswitchd = "vswitchd" + MetricOvsSubsystemDB = "db" ) From 81ab59524a0ddc208b10643e640615f97e9eec6d Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 29 May 2025 18:32:42 -0400 Subject: [PATCH 006/181] Fix node update check for network cluster controller Introduced in 836ec3680aa33300d631b3e7f3bd3069bbfed2b9 This would just cause node updates to fire HandleAddUpdateNodeEvent everytime as the code prior to the aforementioned commit would have. Signed-off-by: Tim Rozet --- go-controller/pkg/clustermanager/network_cluster_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index fde745ac00..0f0f7358e8 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -576,7 +576,7 @@ func (h *networkClusterControllerEventHandler) UpdateResource(oldObj, newObj int // 1. we missed an add event (bug in kapi informer code) // 2. a user removed the annotation on the node // Either way to play it safe for now do a partial json unmarshal check - if !nodeFailed && util.NoHostSubnet(oldNode) != util.NoHostSubnet(newNode) && !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) { + if !nodeFailed && util.NoHostSubnet(oldNode) == util.NoHostSubnet(newNode) && !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) { // no other node updates would require us to reconcile again return nil } From 2b812dd76b7d2a3765c3ee194d4548d7a8422f73 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 29 May 2025 20:49:58 -0400 Subject: [PATCH 007/181] Change NeedsNodeAllocation to a positive check We have unit tests that check to see if only certain annotations were removed, rather than an all or nothing approach. Additionally this function was added as a failsafe in case a user did modify the annotations, or some other unforseen event where the annotations are now missing. Change the function to check each annotation (if it applies to the allocator). Signed-off-by: Tim Rozet --- .../pkg/clustermanager/node/node_allocator.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/go-controller/pkg/clustermanager/node/node_allocator.go b/go-controller/pkg/clustermanager/node/node_allocator.go index 63593618b2..83c3d80fde 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator.go +++ b/go-controller/pkg/clustermanager/node/node_allocator.go @@ -195,27 +195,24 @@ func (na *NodeAllocator) NeedsNodeAllocation(node *corev1.Node) bool { } // ovn node check - // allocation is all or nothing, so if one field was allocated from: - // nodeSubnets, joinSubnet, layer 2 tunnel id, then all of them were if na.hasNodeSubnetAllocation() { - if util.HasNodeHostSubnetAnnotation(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasNodeHostSubnetAnnotation(node, na.netInfo.GetNetworkName()) { + return true } } - if na.hasJoinSubnetAllocation() { - if util.HasNodeGatewayRouterJoinNetwork(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasNodeGatewayRouterJoinNetwork(node, na.netInfo.GetNetworkName()) { + return true } } if util.IsNetworkSegmentationSupportEnabled() && na.netInfo.IsPrimaryNetwork() && util.DoesNetworkRequireTunnelIDs(na.netInfo) { - if util.HasUDNLayer2NodeGRLRPTunnelID(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasUDNLayer2NodeGRLRPTunnelID(node, na.netInfo.GetNetworkName()) { + return true } } - return true + return false } From c68299e597fa02c5d7e7faad588939183b335032 Mon Sep 17 00:00:00 2001 From: Jitse Klomp Date: Tue, 3 Jun 2025 12:01:10 +0200 Subject: [PATCH 008/181] Add mermaid mkdocs plugin Signed-off-by: Jitse Klomp --- mkdocs.yml | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/mkdocs.yml b/mkdocs.yml index e21134af5a..ef1b23e0cb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -42,6 +42,7 @@ plugins: - macros: #include_dir: examples j2_line_comment_prefix: "#$" + - mermaid2 - blog: # NOTE: configuration options can be found at # https://squidfunk.github.io/mkdocs-material/setup/setting-up-a-blog/ diff --git a/requirements.txt b/requirements.txt index ecb270c79d..bb1c507df1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ mkdocs-awesome-pages-plugin mkdocs-macros-plugin mkdocs-material mkdocs-material-extensions +mkdocs-mermaid2-plugin mike pep562 Pygments From 07973c386692604deb3a1248df07710327adc61d Mon Sep 17 00:00:00 2001 From: Jitse Klomp Date: Tue, 3 Jun 2025 12:59:11 +0200 Subject: [PATCH 009/181] Add custom_fences config to mkdocs.yml Signed-off-by: Jitse Klomp --- mkdocs.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index ef1b23e0cb..658b2ae20f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -59,7 +59,11 @@ markdown_extensions: - pymdownx.details - pymdownx.highlight - pymdownx.inlinehilite - - pymdownx.superfences + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:mermaid2.fence_mermaid_custom - pymdownx.snippets: base_path: site-src check_paths: true From b56df72578329832990aedd3dc09dddab9788513 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 2 Jun 2025 16:01:51 -0400 Subject: [PATCH 010/181] Some quality of life improvements for layer 3 controllers node handling During update node events, local and remote addOrUpdate functions are called. There are a series of sync checks used to know what to configure. However, in some cases log messages were being printed no matter what, and hybrid overlay was being processed on every node event. This cleans things up so that hybrid overlay is only sync'ed when necessary, and logs are only printed when work is being done to add the local or remote node. Also, removes an old test case for hybrid overlay where the node-subnets annotation of a node was being removed. First introduced here: https://github.com/ovn-kubernetes/ovn-kubernetes/commit/aef135c61f2849d2a1f40bc48caa30b1d3ed30ef#diff-9ab180ea9a39f81dc8334a00ca8ea5e4cd04f9491c27dcfd910b07929c9ddbb5R193 It's not totally clear what the purpose of this test was, but we do not support clearing OVN configuration when OVNK assigned annotations are removed by the user. The node-subnets annotation should not be removed, and if is removed, it should be configured back onto the node by cluster-manager. Signed-off-by: Tim Rozet --- .../pkg/ovn/default_network_controller.go | 37 ++- go-controller/pkg/ovn/hybrid.go | 14 +- go-controller/pkg/ovn/hybrid_test.go | 223 ------------------ go-controller/pkg/ovn/master.go | 68 ++++-- .../secondary_layer3_network_controller.go | 6 +- .../ovn/zone_interconnect/zone_ic_handler.go | 2 +- 6 files changed, 89 insertions(+), 261 deletions(-) diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index 3ea00bcb17..f6b3727c55 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -15,6 +15,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" @@ -762,6 +763,16 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from var aggregatedErrors []error if h.oc.isLocalZoneNode(node) { var nodeParams *nodeSyncs + hoNeedsCleanup := false + if !config.HybridOverlay.Enabled { + // check if the node has the stale annotations on it to signal that we need to clean up + if _, exists := node.Annotations[hotypes.HybridOverlayDRIP]; exists { + hoNeedsCleanup = true + } + if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { + hoNeedsCleanup = true + } + } if fromRetryLoop { _, nodeSync := h.oc.addNodeFailed.Load(node.Name) _, clusterRtrSync := h.oc.nodeClusterRouterPortFailed.Load(node.Name) @@ -774,7 +785,7 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from syncClusterRouterPort: clusterRtrSync, syncMgmtPort: mgmtSync, syncGw: gwSync, - syncHo: hoSync, + syncHo: hoSync || hoNeedsCleanup, syncZoneIC: zoneICSync} } else { nodeParams = &nodeSyncs{ @@ -782,10 +793,9 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from syncClusterRouterPort: true, syncMgmtPort: true, syncGw: true, - syncHo: config.HybridOverlay.Enabled, + syncHo: config.HybridOverlay.Enabled || hoNeedsCleanup, syncZoneIC: config.OVNKubernetesFeature.EnableInterconnect} } - if err = h.oc.addUpdateLocalNodeEvent(node, nodeParams); err != nil { klog.Infof("Node add failed for %s, will try again later: %v", node.Name, err) @@ -941,6 +951,16 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int _, failed = h.oc.gatewaysFailed.Load(newNode.Name) gwSync := failed || gatewayChanged(oldNode, newNode) || nodeSubnetChange || hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) + hoNeedsCleanup := false + if !config.HybridOverlay.Enabled { + // check if the node has the stale annotations on it to signal that we need to clean up + if _, exists := newNode.Annotations[hotypes.HybridOverlayDRIP]; exists { + hoNeedsCleanup = true + } + if _, exist := newNode.Annotations[hotypes.HybridOverlayDRMAC]; exist { + hoNeedsCleanup = true + } + } _, hoSync := h.oc.hybridOverlayFailed.Load(newNode.Name) _, syncZoneIC := h.oc.syncZoneICFailed.Load(newNode.Name) syncZoneIC = syncZoneIC || zoneClusterChanged || primaryAddrChanged(oldNode, newNode) @@ -949,12 +969,12 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int syncClusterRouterPort: clusterRtrSync, syncMgmtPort: mgmtSync, syncGw: gwSync, - syncHo: hoSync, + syncHo: hoSync || hoNeedsCleanup, syncZoneIC: syncZoneIC, } } else { - klog.Infof("Node %s moved from the remote zone %s to local zone %s.", - newNode.Name, util.GetNodeZone(oldNode), util.GetNodeZone(newNode)) + klog.Infof("Node %s moved from the remote zone %s to local zone %s, in network: %q", + newNode.Name, util.GetNodeZone(oldNode), util.GetNodeZone(newNode), h.oc.GetNetworkName()) // The node is now a local zone node. Trigger a full node sync. nodeSyncsParam = &nodeSyncs{ syncNode: true, @@ -964,7 +984,6 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int syncHo: true, syncZoneIC: config.OVNKubernetesFeature.EnableInterconnect} } - if err := h.oc.addUpdateLocalNodeEvent(newNode, nodeSyncsParam); err != nil { aggregatedErrors = append(aggregatedErrors, err) } @@ -977,8 +996,8 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int syncZoneIC = syncZoneIC || h.oc.isLocalZoneNode(oldNode) || nodeSubnetChange || zoneClusterChanged || switchToOvnNode || nodeEncapIPsChanged if syncZoneIC { - klog.Infof("Node %s in remote zone %s needs interconnect zone sync up. Zone cluster changed: %v", - newNode.Name, util.GetNodeZone(newNode), zoneClusterChanged) + klog.Infof("Node %q in remote zone %q, network %q, needs interconnect zone sync up. Zone cluster changed: %v", + newNode.Name, util.GetNodeZone(newNode), h.oc.GetNetworkName(), zoneClusterChanged) } if err := h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC); err != nil { aggregatedErrors = append(aggregatedErrors, err) diff --git a/go-controller/pkg/ovn/hybrid.go b/go-controller/pkg/ovn/hybrid.go index 6386d719e9..7c84dea2aa 100644 --- a/go-controller/pkg/ovn/hybrid.go +++ b/go-controller/pkg/ovn/hybrid.go @@ -137,12 +137,18 @@ func (oc *DefaultNetworkController) handleHybridOverlayPort(node *corev1.Node, a } func (oc *DefaultNetworkController) deleteHybridOverlayPort(node *corev1.Node) error { - klog.Infof("Removing node %s hybrid overlay port", node.Name) portName := util.GetHybridOverlayPortName(node.Name) lsp := nbdb.LogicalSwitchPort{Name: portName} - sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(node.Name)} - if err := libovsdbops.DeleteLogicalSwitchPorts(oc.nbClient, &sw, &lsp); err != nil { - return err + if _, err := libovsdbops.GetLogicalSwitchPort(oc.nbClient, &lsp); err != nil { + if !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed to get logical switch port for hybrid overlay port %s, err: %v", portName, err) + } + } else { + sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(node.Name)} + klog.Infof("Removing node %s hybrid overlay port", node.Name) + if err := libovsdbops.DeleteLogicalSwitchPorts(oc.nbClient, &sw, &lsp); err != nil { + return err + } } if err := oc.removeHybridLRPolicySharedGW(node); err != nil { return err diff --git a/go-controller/pkg/ovn/hybrid_test.go b/go-controller/pkg/ovn/hybrid_test.go index a65294f345..1663e5a8f8 100644 --- a/go-controller/pkg/ovn/hybrid_test.go +++ b/go-controller/pkg/ovn/hybrid_test.go @@ -1475,229 +1475,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - ginkgo.It("cleans up a Linux node when the OVN hostsubnet annotation is removed", func() { - app.Action = func(ctx *cli.Context) error { - const ( - nodeHOMAC string = "0a:58:0a:01:01:03" - hoSubnet string = "11.1.0.0/16" - nodeHOIP string = "10.1.1.3" - ) - node1 := tNode{ - Name: "node1", - NodeIP: "1.2.3.4", - NodeLRPMAC: "0a:58:0a:01:01:01", - LrpIP: "100.64.0.2", - DrLrpIP: "100.64.0.1", - PhysicalBridgeMAC: "11:22:33:44:55:66", - SystemID: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac6", - NodeSubnet: "10.1.1.0/24", - GWRouter: types.GWRouterPrefix + "node1", - GatewayRouterIPMask: "172.16.16.2/24", - GatewayRouterIP: "172.16.16.2", - GatewayRouterNextHop: "172.16.16.1", - PhysicalBridgeName: "br-eth0", - NodeGWIP: "10.1.1.1/24", - NodeMgmtPortIP: "10.1.1.2", - //NodeMgmtPortMAC: "0a:58:0a:01:01:02", - NodeMgmtPortMAC: "0a:58:64:40:00:03", - DnatSnatIP: "169.254.0.1", - } - testNode := node1.k8sNode("2") - - kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ - Items: []corev1.Node{testNode}, - }) - egressFirewallFakeClient := &egressfirewallfake.Clientset{} - egressIPFakeClient := &egressipfake.Clientset{} - egressQoSFakeClient := &egressqosfake.Clientset{} - egressServiceFakeClient := &egressservicefake.Clientset{} - fakeClient := &util.OVNMasterClientset{ - KubeClient: kubeFakeClient, - EgressIPClient: egressIPFakeClient, - EgressFirewallClient: egressFirewallFakeClient, - EgressQoSClient: egressQoSFakeClient, - EgressServiceClient: egressServiceFakeClient, - } - - vlanID := 1024 - _, err := config.InitConfig(ctx, nil, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - config.Kubernetes.HostNetworkNamespace = "" - nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{KClient: kubeFakeClient}, testNode.Name) - l3Config := node1.gatewayConfig(config.GatewayModeShared, uint(vlanID)) - err = util.SetL3GatewayConfig(nodeAnnotator, l3Config) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.UpdateNodeManagementPortMACAddresses(&testNode, nodeAnnotator, - ovntest.MustParseMAC(node1.NodeMgmtPortMAC), types.DefaultNetworkName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(node1.NodeSubnet)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.SetNodeHostCIDRs(nodeAnnotator, sets.New(fmt.Sprintf("%s/24", node1.NodeIP))) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = nodeAnnotator.Run() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - f, err = factory.NewMasterWatchFactory(fakeClient) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = f.Start() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - expectedClusterLBGroup := newLoadBalancerGroup(types.ClusterLBGroupName) - expectedSwitchLBGroup := newLoadBalancerGroup(types.ClusterSwitchLBGroupName) - expectedRouterLBGroup := newLoadBalancerGroup(types.ClusterRouterLBGroupName) - expectedOVNClusterRouter := newOVNClusterRouter() - ovnClusterRouterLRP := &nbdb.LogicalRouterPort{ - Name: types.GWRouterToJoinSwitchPrefix + types.OVNClusterRouter, - Networks: []string{"100.64.0.1/16"}, - UUID: types.GWRouterToJoinSwitchPrefix + types.OVNClusterRouter + "-UUID", - } - expectedOVNClusterRouter.Ports = []string{ovnClusterRouterLRP.UUID} - expectedNodeSwitch := node1.logicalSwitch([]string{expectedClusterLBGroup.UUID, expectedSwitchLBGroup.UUID}) - expectedClusterRouterPortGroup := newRouterPortGroup() - expectedClusterPortGroup := newClusterPortGroup() - - dbSetup := libovsdbtest.TestSetup{ - NBData: []libovsdbtest.TestData{ - newClusterJoinSwitch(), - expectedNodeSwitch, - ovnClusterRouterLRP, - expectedOVNClusterRouter, - expectedClusterRouterPortGroup, - expectedClusterPortGroup, - expectedClusterLBGroup, - expectedSwitchLBGroup, - expectedRouterLBGroup, - }, - } - var libovsdbOvnNBClient, libovsdbOvnSBClient libovsdbclient.Client - libovsdbOvnNBClient, libovsdbOvnSBClient, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(dbSetup) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - expectedDatabaseState := []libovsdbtest.TestData{ovnClusterRouterLRP} - expectedDatabaseState = addNodeLogicalFlows(expectedDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1) - - clusterController, err := NewOvnController( - fakeClient, - f, - stopChan, - nil, - networkmanager.Default().Interface(), - libovsdbOvnNBClient, - libovsdbOvnSBClient, - record.NewFakeRecorder(10), - wg, - nil, - NewPortCache(stopChan), - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - setupCOPP := true - setupClusterController(clusterController, setupCOPP) - - //assuming all the pods have finished processing - atomic.StoreUint32(&clusterController.allInitialPodsProcessed, 1) - // Let the real code run and ensure OVN database sync - gomega.Expect(clusterController.WatchNodes()).To(gomega.Succeed()) - - gomega.Eventually(func() (map[string]string, error) { - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return updatedNode.Annotations, nil - }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) - - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - var clusterSubnets []*net.IPNet - for _, clusterSubnet := range config.Default.ClusterSubnets { - clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) - } - - skipSnat := false - expectedDatabaseState = generateGatewayInitExpectedNB(expectedDatabaseState, expectedOVNClusterRouter, - expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3Config, - []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, skipSnat, - node1.NodeMgmtPortIP, "1400") - - hybridSubnetStaticRoute1, hybridLogicalRouterStaticRoute, hybridSubnetLRP1, hybridSubnetLRP2, hybridLogicalSwitchPort := setupHybridOverlayOVNObjects(node1, "", hoSubnet, nodeHOIP, nodeHOMAC) - - var node1LogicalRouter *nbdb.LogicalRouter - var basicNode1StaticRoutes []string - - for _, obj := range expectedDatabaseState { - if logicalRouter, ok := obj.(*nbdb.LogicalRouter); ok { - if logicalRouter.Name == "GR_node1" { - // keep a referance so that we can edit this object - node1LogicalRouter = logicalRouter - basicNode1StaticRoutes = logicalRouter.StaticRoutes - logicalRouter.StaticRoutes = append(logicalRouter.StaticRoutes, hybridLogicalRouterStaticRoute.UUID) - } - } - } - - // keep copies of these before appending hybrid overlay elements - basicExpectedNodeSwitchPorts := expectedNodeSwitch.Ports - basicExpectedOVNClusterRouterPolicies := expectedOVNClusterRouter.Policies - basicExpectedOVNClusterStaticRoutes := expectedOVNClusterRouter.StaticRoutes - - expectedNodeSwitch.Ports = append(expectedNodeSwitch.Ports, hybridLogicalSwitchPort.UUID) - expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, hybridSubnetLRP1.UUID, hybridSubnetLRP2.UUID) - expectedOVNClusterRouter.StaticRoutes = append(expectedOVNClusterRouter.StaticRoutes, hybridSubnetStaticRoute1.UUID) - - expectedDatabaseStateWithHybridNode := append([]libovsdbtest.TestData{hybridSubnetStaticRoute1, hybridSubnetLRP2, hybridSubnetLRP1, hybridLogicalSwitchPort, hybridLogicalRouterStaticRoute}, expectedDatabaseState...) - expectedStaticMACBinding := &nbdb.StaticMACBinding{ - UUID: "MAC-binding-HO-UUID", - IP: nodeHOIP, - LogicalPort: "rtos-node1", - MAC: nodeHOMAC, - OverrideDynamicMAC: true, - } - expectedDatabaseStateWithHybridNode = append(expectedDatabaseStateWithHybridNode, expectedStaticMACBinding) - gomega.Eventually(libovsdbOvnNBClient).Should(libovsdbtest.HaveData(expectedDatabaseStateWithHybridNode)) - - nodeAnnotator = kube.NewNodeAnnotator(&kube.Kube{KClient: kubeFakeClient}, testNode.Name) - util.DeleteNodeHostSubnetAnnotation(nodeAnnotator) - err = nodeAnnotator.Run() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Eventually(func() (map[string]string, error) { - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return updatedNode.Annotations, nil - }, 5).ShouldNot(gomega.HaveKey(hotypes.HybridOverlayDRMAC)) - - // restore values from the non-hybrid versions - expectedNodeSwitch.Ports = basicExpectedNodeSwitchPorts - expectedOVNClusterRouter.Policies = basicExpectedOVNClusterRouterPolicies - expectedOVNClusterRouter.StaticRoutes = basicExpectedOVNClusterStaticRoutes - node1LogicalRouter.StaticRoutes = basicNode1StaticRoutes - - gomega.Eventually(libovsdbOvnNBClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) - - return nil - } - err := app.Run([]string{ - app.Name, - "-cluster-subnets=" + clusterCIDR, - "-gateway-mode=shared", - "-enable-hybrid-overlay", - "-hybrid-overlay-cluster-subnets=" + hybridOverlayClusterCIDR, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("cleans up a Linux node that has hybridOverlay annotations and database objects when hybrid overlay is disabled", func() { app.Action = func(ctx *cli.Context) error { const ( diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 78b5ce2402..c2ca98a59e 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -470,6 +470,16 @@ type nodeSyncs struct { syncReroute bool } +func nodeNeedsSync(syncs *nodeSyncs) bool { + return syncs.syncNode || + syncs.syncClusterRouterPort || + syncs.syncMgmtPort || + syncs.syncGw || + syncs.syncHo || + syncs.syncZoneIC || + syncs.syncReroute +} + func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { var hostSubnets []*net.IPNet var errs []error @@ -492,7 +502,11 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n return nil } - klog.Infof("Adding or Updating Node %q", node.Name) + if !nodeNeedsSync(nSyncs) { + return nil + } + + klog.Infof("Adding or Updating local node %q for network %q", node.Name, oc.GetNetworkName()) if nSyncs.syncNode { if hostSubnets, err = oc.addNode(node); err != nil { oc.addNodeFailed.Store(node.Name, true) @@ -509,12 +523,10 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n } // since the nodeSync objects are created knowing if hybridOverlay is enabled this should work - if nSyncs.syncHo { + if nSyncs.syncHo && config.HybridOverlay.Enabled { if err = oc.allocateHybridOverlayDRIP(node); err != nil { errs = append(errs, err) oc.hybridOverlayFailed.Store(node.Name, true) - } else { - oc.hybridOverlayFailed.Delete(node.Name) } } @@ -551,27 +563,37 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n } } - annotator := kube.NewNodeAnnotator(oc.kube, node.Name) - if config.HybridOverlay.Enabled { - if err := oc.handleHybridOverlayPort(node, annotator); err != nil { - errs = append(errs, fmt.Errorf("failed to set up hybrid overlay logical switch port for %s: %v", node.Name, err)) - } - } else { - // the node needs to cleanup Hybrid overlay annotations LogicalRouterPolicies and Hybrid overlay port - // if it has them and hybrid overlay is not enabled - if err := oc.deleteHybridOverlayPort(node); err != nil { - errs = append(errs, err) - } - if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { - annotator.Delete(hotypes.HybridOverlayDRMAC) + if nSyncs.syncHo { + annotator := kube.NewNodeAnnotator(oc.kube, node.Name) + if config.HybridOverlay.Enabled { + if err := oc.handleHybridOverlayPort(node, annotator); err != nil { + errs = append(errs, fmt.Errorf("failed to set up hybrid overlay logical switch port for %s: %v", node.Name, err)) + oc.hybridOverlayFailed.Store(node.Name, true) + } else { + oc.hybridOverlayFailed.Delete(node.Name) + } + } else { + // pedantic - node should never be stored in hybridOverlayFailed if HO is not enabled + oc.hybridOverlayFailed.Delete(node.Name) + + // the node needs to cleanup Hybrid overlay annotations LogicalRouterPolicies and Hybrid overlay port + // if it has them and hybrid overlay is not enabled + if err := oc.deleteHybridOverlayPort(node); err != nil { + errs = append(errs, err) + } else { + // only clear annotations if tear down was successful + if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { + annotator.Delete(hotypes.HybridOverlayDRMAC) + } + if _, exist := node.Annotations[hotypes.HybridOverlayDRIP]; exist { + annotator.Delete(hotypes.HybridOverlayDRIP) + } + } } - if _, exist := node.Annotations[hotypes.HybridOverlayDRIP]; exist { - annotator.Delete(hotypes.HybridOverlayDRIP) + if err := annotator.Run(); err != nil { + errs = append(errs, fmt.Errorf("failed to set hybrid overlay annotations for node %s: %v", node.Name, err)) } } - if err := annotator.Run(); err != nil { - errs = append(errs, fmt.Errorf("failed to set hybrid overlay annotations for node %s: %v", node.Name, err)) - } if nSyncs.syncGw { err := oc.syncNodeGateway(node, nil) @@ -653,8 +675,8 @@ func (oc *DefaultNetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, } else { oc.syncZoneICFailed.Delete(node.Name) } + klog.V(5).Infof("Creating Interconnect resources for remote node %q on network %q took: %s", node.Name, oc.GetNetworkName(), time.Since(start)) } - klog.V(5).Infof("Creating Interconnect resources for node %v took: %s", node.Name, time.Since(start)) return err } diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index 65ca015ab7..cb9f82d08f 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -715,7 +715,11 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 return nil } - klog.Infof("Adding or Updating Node %q for network %s", node.Name, oc.GetNetworkName()) + if !nodeNeedsSync(nSyncs) { + return nil + } + + klog.Infof("Adding or Updating local node %q for network %q", node.Name, oc.GetNetworkName()) if nSyncs.syncNode { if hostSubnets, err = oc.addNode(node); err != nil { oc.addNodeFailed.Store(node.Name, true) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index 3e4cfa458b..d46c567c33 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -218,7 +218,7 @@ func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { if err := zic.createRemoteZoneNodeResources(node, nodeID); err != nil { return fmt.Errorf("creating interconnect resources for remote zone node %s for the network %s failed : err - %w", node.Name, zic.GetNetworkName(), err) } - klog.Infof("Creating Interconnect resources for node %v took: %s", node.Name, time.Since(start)) + klog.Infof("Creating Interconnect resources for node %q on network %q took: %s", node.Name, zic.GetNetworkName(), time.Since(start)) return nil } From edc159d0add6c45dfd81cefccec0333bb2d96af8 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 2 Jun 2025 17:36:01 -0400 Subject: [PATCH 011/181] Optimize ic handler a little for UDN When remote nodes are added (as new UDNs are created) the first remote add always fails. This is because the controller is waiting for the subnets annotation to be updated for the network. However, it only partially fails. It fails when the routes are attempting to be added, but this is after the logical switch port logic and some other parsing has already been done. Rather than execute this work twice, just bail early if the node does not have all of the annotations yet. This way we can execute the majority of the work only one time. With this change, only once all annotations are present will you see: "Creating interconnect resources for remote zone node" Signed-off-by: Tim Rozet --- .../ovn/zone_interconnect/zone_ic_handler.go | 88 ++++++++++--------- 1 file changed, 47 insertions(+), 41 deletions(-) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index d46c567c33..87e178ae5b 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -207,7 +207,6 @@ func (zic *ZoneInterconnectHandler) AddLocalZoneNode(node *corev1.Node) error { // // See createRemoteZoneNodeResources() below for more details. func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { start := time.Now() - klog.Infof("Creating interconnect resources for remote zone node %s for the network %s", node.Name, zic.GetNetworkName()) nodeID := util.GetNodeID(node) if nodeID == -1 { @@ -215,7 +214,50 @@ func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { return fmt.Errorf("failed to get node id for node - %s", node.Name) } - if err := zic.createRemoteZoneNodeResources(node, nodeID); err != nil { + nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(node, zic.GetNetworkName()) + if err != nil { + err = fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + return err + } + + nodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node) + if err != nil || len(nodeTransitSwitchPortIPs) == 0 { + err = fmt.Errorf("failed to get the node transit switch port IP addresses : %w", err) + if util.IsAnnotationNotSetError(err) { + return types.NewSuppressedError(err) + } + return err + } + + var nodeGRPIPs []*net.IPNet + // only primary networks have cluster router connected to join switch+GR + // used for adding routes to GR + if !zic.IsSecondary() || (util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { + nodeGRPIPs, err = util.ParseNodeGatewayRouterJoinAddrs(node, zic.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // FIXME(tssurya): This is present for backwards compatibility + // Remove me a few months from now + var err1 error + nodeGRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) + if err1 != nil { + err1 = fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) + if util.IsAnnotationNotSetError(err1) { + return types.NewSuppressedError(err1) + } + return err1 + } + } + } + } + + klog.Infof("Creating interconnect resources for remote zone node %s for the network %s", node.Name, zic.GetNetworkName()) + + if err := zic.createRemoteZoneNodeResources(node, nodeID, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs); err != nil { return fmt.Errorf("creating interconnect resources for remote zone node %s for the network %s failed : err - %w", node.Name, zic.GetNetworkName(), err) } klog.Infof("Creating Interconnect resources for node %q on network %q took: %s", node.Name, zic.GetNetworkName(), time.Since(start)) @@ -403,16 +445,7 @@ func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.No // if the node name is ovn-worker and the network name is blue, the logical port name would be - blue.tstor.ovn-worker // - binds the remote port to the node remote chassis in SBDB // - adds static routes for the remote node via the remote port ip in the ovn_cluster_router -func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int) error { - nodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node) - if err != nil || len(nodeTransitSwitchPortIPs) == 0 { - err = fmt.Errorf("failed to get the node transit switch port IP addresses : %w", err) - if util.IsAnnotationNotSetError(err) { - return types.NewSuppressedError(err) - } - return err - } - +func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs []*net.IPNet) error { transitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP) var transitRouterPortNetworks []string for _, ip := range nodeTransitSwitchPortIPs { @@ -438,7 +471,7 @@ func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.N return err } - if err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs); err != nil { + if err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs); err != nil { return err } @@ -534,7 +567,7 @@ func (zic *ZoneInterconnectHandler) cleanupNodeTransitSwitchPort(nodeName string // Then the below static routes are added // ip4.dst == 10.244.0.0/24 , nexthop = 100.88.0.2 // ip4.dst == 100.64.0.2/16 , nexthop = 100.88.0.2 (only for default primary network) -func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs []*net.IPNet) error { +func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs []*net.IPNet) error { addRoute := func(prefix, nexthop string) error { logicalRouterStaticRoute := nbdb.LogicalRouterStaticRoute{ ExternalIDs: map[string]string{ @@ -554,16 +587,6 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, return nil } - nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(node, zic.GetNetworkName()) - if err != nil { - err = fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) - if util.IsAnnotationNotSetError(err) { - // remote node may not have the annotation yet, suppress it - return types.NewSuppressedError(err) - } - return err - } - nodeSubnetStaticRoutes := zic.getStaticRoutes(nodeSubnets, nodeTransitSwitchPortIPs, false) for _, staticRoute := range nodeSubnetStaticRoutes { // Possible optimization: Add all the routes in one transaction @@ -580,23 +603,6 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, return nil } - nodeGRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, zic.GetNetworkName()) - if err != nil { - if util.IsAnnotationNotSetError(err) { - // FIXME(tssurya): This is present for backwards compatibility - // Remove me a few months from now - var err1 error - nodeGRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) - if err1 != nil { - err1 = fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) - if util.IsAnnotationNotSetError(err1) { - return types.NewSuppressedError(err1) - } - return err1 - } - } - } - nodeGRPIPStaticRoutes := zic.getStaticRoutes(nodeGRPIPs, nodeTransitSwitchPortIPs, true) for _, staticRoute := range nodeGRPIPStaticRoutes { // Possible optimization: Add all the routes in one transaction From 98518eaae261296c10ae18d20c783056ee95ee1d Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 2 Jun 2025 17:54:18 -0400 Subject: [PATCH 012/181] Minor improvement to route add for remote zone nodes Just execute the 2 route adds in the same txn Signed-off-by: Tim Rozet --- .../ovn/zone_interconnect/zone_ic_handler.go | 31 +++++++++---------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index 87e178ae5b..cc849b6c15 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -13,6 +13,7 @@ import ( utilnet "k8s.io/utils/net" libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -568,6 +569,7 @@ func (zic *ZoneInterconnectHandler) cleanupNodeTransitSwitchPort(nodeName string // ip4.dst == 10.244.0.0/24 , nexthop = 100.88.0.2 // ip4.dst == 100.64.0.2/16 , nexthop = 100.88.0.2 (only for default primary network) func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs []*net.IPNet) error { + ops := make([]ovsdb.Operation, 0, 2) addRoute := func(prefix, nexthop string) error { logicalRouterStaticRoute := nbdb.LogicalRouterStaticRoute{ ExternalIDs: map[string]string{ @@ -581,37 +583,32 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, lrsr.Nexthop == nexthop && lrsr.ExternalIDs["ic-node"] == node.Name } - if err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(zic.nbClient, zic.networkClusterRouterName, &logicalRouterStaticRoute, p); err != nil { - return fmt.Errorf("failed to create static route: %w", err) + var err error + ops, err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicateOps(zic.nbClient, ops, zic.networkClusterRouterName, &logicalRouterStaticRoute, p) + if err != nil { + return fmt.Errorf("failed to create static route ops: %w", err) } return nil } nodeSubnetStaticRoutes := zic.getStaticRoutes(nodeSubnets, nodeTransitSwitchPortIPs, false) for _, staticRoute := range nodeSubnetStaticRoutes { - // Possible optimization: Add all the routes in one transaction if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) } } - if zic.IsSecondary() && !(util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { - // Secondary network cluster router doesn't connect to a join switch - // or to a Gateway router. - // - // Except for UDN primary L3 networks. - return nil - } - - nodeGRPIPStaticRoutes := zic.getStaticRoutes(nodeGRPIPs, nodeTransitSwitchPortIPs, true) - for _, staticRoute := range nodeGRPIPStaticRoutes { - // Possible optimization: Add all the routes in one transaction - if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { - return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) + if len(nodeGRPIPs) > 0 { + nodeGRPIPStaticRoutes := zic.getStaticRoutes(nodeGRPIPs, nodeTransitSwitchPortIPs, true) + for _, staticRoute := range nodeGRPIPStaticRoutes { + if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { + return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) + } } } - return nil + _, err := libovsdbops.TransactAndCheck(zic.nbClient, ops) + return err } // deleteLocalNodeStaticRoutes deletes the static routes added by the function addRemoteNodeStaticRoutes From 4fa8bf0087671d2683679bd24a6e217050e09f5c Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Tue, 3 Jun 2025 10:15:44 +0200 Subject: [PATCH 013/181] udn: Fix NAD template for join subnets field When a CUDN/UDN is create with joinSubnets field configured it should generate the net-attach-def with `joinSubnet` field, the code was using `joinSubnets` wich is not undertood by ovn-kubernetes. Signed-off-by: Enrique Llorente --- .../template/net-attach-def-template.go | 2 +- .../template/net-attach-def-template_test.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go index 8d11c0960b..a06e7085ed 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go @@ -192,7 +192,7 @@ func renderCNINetworkConfig(networkName, nadName string, spec SpecGetter) (map[s cniNetConf["mtu"] = mtu } if len(netConfSpec.JoinSubnet) > 0 { - cniNetConf["joinSubnets"] = netConfSpec.JoinSubnet + cniNetConf["joinSubnet"] = netConfSpec.JoinSubnet } if len(netConfSpec.Subnets) > 0 { cniNetConf["subnets"] = netConfSpec.Subnets diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go index 0c06f3a270..68f2e4022a 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go @@ -326,7 +326,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer3", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/16,2001:dbb::/60", "mtu": 1500 }`, @@ -350,7 +350,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -376,7 +376,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.62.0.0/24,fd92::/64", + "joinSubnet": "100.62.0.0/24,fd92::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -461,7 +461,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer3", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/16,2001:dbb::/60", "mtu": 1500 }`, @@ -485,7 +485,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -511,7 +511,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.62.0.0/24,fd92::/64", + "joinSubnet": "100.62.0.0/24,fd92::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true From 399915a64ba05538a1c41e67f91c1c1d95f3c397 Mon Sep 17 00:00:00 2001 From: Alin Gabriel Serdean Date: Sun, 8 Jun 2025 14:14:35 +0000 Subject: [PATCH 014/181] workflow: Add fix missing and apt update before trying to install VRF module Signed-off-by: Alin Gabriel Serdean --- .github/workflows/test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dea0289e73..748144a7cf 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -200,6 +200,7 @@ jobs: if: steps.is_pr_image_build_needed.outputs.PR_IMAGE_RESTORED != 'true' && success() run: | set -x + sudo apt update sudo apt-get install linux-modules-extra-$(uname -r) -y sudo modprobe vrf @@ -500,6 +501,7 @@ jobs: - name: Install VRF kernel module run: | set -x + sudo apt update sudo apt-get install linux-modules-extra-$(uname -r) -y sudo modprobe vrf From 575f3c017c8b729b836a61c0fcb409b4b910803b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Tue, 27 May 2025 10:30:52 +0000 Subject: [PATCH 015/181] Align e2e test timeouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that ginkgo times out first and we get useful output. Signed-off-by: Jaime Caamaño Ruiz --- .github/workflows/test.yml | 4 +++- test/scripts/e2e-cp.sh | 9 +++++++-- test/scripts/e2e-kind.sh | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 748144a7cf..7e574ecf4d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -616,7 +616,9 @@ jobs: - name: Run Tests # e2e tests take ~60 minutes normally, 120 should be more than enough # set 3 hours for control-plane tests as these might take a while - timeout-minutes: ${{ matrix.target == 'control-plane' && 180 || matrix.target == 'control-plane-helm' && 180 || matrix.target == 'external-gateway' && 180 || 120 }} + # give 10m extra to give ginkgo chance to timeout before github so that we + # get its output + timeout-minutes: ${{ matrix.target == 'bgp' && 190 || matrix.target == 'control-plane' && 190 || matrix.target == 'control-plane-helm' && 190 || matrix.target == 'external-gateway' && 190 || 130 }} run: | # used by e2e diagnostics package export OVN_IMAGE="ovn-daemonset-fedora:pr" diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index cf9589e589..40d8071f92 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -184,13 +184,18 @@ export NUM_NODES=2 FOCUS=$(echo ${@:1} | sed 's/ /\\s/g') +# Ginkgo test timeout needs to be lower than both github's timeout and go test +# timeout to be able to get proper Ginkgo output when it happens. +TEST_TIMEOUT=${TEST_TIMEOUT:-180} +GO_TEST_TIMEOUT=$((TEST_TIMEOUT + 5)) + pushd e2e go mod download -go test -test.timeout 180m -v . \ +go test -test.timeout ${GO_TEST_TIMEOUT}m -v . \ -ginkgo.v \ -ginkgo.focus ${FOCUS:-.} \ - -ginkgo.timeout 3h \ + -ginkgo.timeout ${TEST_TIMEOUT}m \ -ginkgo.flake-attempts ${FLAKE_ATTEMPTS:-2} \ -ginkgo.skip="${SKIPPED_TESTS}" \ -ginkgo.junit-report=${E2E_REPORT_DIR}/junit_${E2E_REPORT_PREFIX}report.xml \ diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh index 2ec08b59ff..1cab2fc05d 100755 --- a/test/scripts/e2e-kind.sh +++ b/test/scripts/e2e-kind.sh @@ -200,7 +200,7 @@ fi # timeout needs to be lower than github's timeout. Otherwise github terminates # the job and doesn't give ginkgo a chance to print status so that we know why # the timeout happened. -TEST_TIMEOUT=${TEST_TIMEOUT:-100m} +TEST_TIMEOUT=${TEST_TIMEOUT:-120m} ginkgo --nodes=${NUM_NODES} \ --focus=${FOCUS} \ From b5bc88df55c4ac0654d2350f01996f85474f43ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 5 Jun 2025 13:01:17 +0000 Subject: [PATCH 016/181] Bump priority of egress ClusterIP traffic drop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a flow [1] to prevent leaking traffic towards a ClusterIP. However we also have a flow to prevent EIP traffic to egress before being SNATed and an additional flow to actually allow the traffic to egress in ICNI/BGP scenarios for pods on the nodes subnet [2]. The higher priority of flow [2] prevents flow [1] to be in effect. Bump priority of flow [1] since there is no case where we should leak traffic towards ClusterIPs. [1] cookie=0xdeff105, duration=492.235s, table=0, n_packets=0, n_bytes=0, priority=105,ipv6,in_port="patch-breth0_ov",ipv6_dst=fd00:10:96::/112 actions=drop [2] cookie=0xdeff105, duration=2308.615s, table=0, n_packets=4, n_bytes=376, priority=109,ipv6,in_port="patch-breth0_ov",dl_src=96:b0:34:18:12:7c,ipv6_src=fd00:10:244:1::/64 actions=ct(commit,zone=64000,exec(load:0x1->NXM_NX_CT_MARK[])),output:eth0 cookie=0xdeff105, duration=1991.854s, table=0, n_packets=0, n_bytes=0, priority=104,ipv6,in_port="patch-breth0_ov",ipv6_src=fd00:10:244::/48 actions=drop Signed-off-by: Jaime Caamaño Ruiz --- go-controller/pkg/node/gateway_shared_intf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index bd7a17f8ca..2654291850 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1676,7 +1676,7 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, %s, %s_dst=%s,"+ + fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ "actions=drop", defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR)) } } From 19f39c2cfa16a18bab3bf7b9287db7ef36dacd7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Tue, 27 May 2025 10:33:08 +0000 Subject: [PATCH 017/181] Change BGP e2e lane config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change configuration in preparation for running all control plane tests: * Make both dualstack, not much value testing IPv4 single stack * Make one of the lanes noSnatGW to get signal from that as well * Enable multicast and empty LB events * Configure host to be able to route to networks from the external world * Ensure frr container is not able to route through the host/runner Signed-off-by: Jaime Caamaño Ruiz --- .github/workflows/test.yml | 8 ++++---- contrib/kind-common | 13 ++++++++++--- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7e574ecf4d..2cb46a04ee 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -464,15 +464,15 @@ jobs: - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv6", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "bgp", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} - - {"target": "bgp", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} + - {"target": "bgp", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} - {"target": "traffic-flow-test-only","ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "traffic-flow-tests": "1-24", "network-segmentation": "enable-network-segmentation"} - {"target": "tools", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "network-segmentation": "enable-network-segmentation"} needs: [ build-pr ] env: JOB_NAME: "${{ matrix.target }}-${{ matrix.ha }}-${{ matrix.gateway-mode }}-${{ matrix.ipfamily }}-${{ matrix.disable-snat-multiple-gws }}-${{ matrix.second-bridge }}-${{ matrix.ic }}" OVN_HYBRID_OVERLAY_ENABLE: ${{ (matrix.target == 'control-plane' || matrix.target == 'control-plane-helm') && (matrix.ipfamily == 'ipv4' || matrix.ipfamily == 'dualstack' ) }} - OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' }}" - OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" + OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' || matrix.target == 'bgp' }}" + OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'bgp' }}" OVN_HA: "${{ matrix.ha == 'HA' }}" OVN_DISABLE_SNAT_MULTIPLE_GWS: "${{ matrix.disable-snat-multiple-gws == 'noSnatGW' }}" KIND_INSTALL_METALLB: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' }}" @@ -559,7 +559,7 @@ jobs: echo OVN_TEST_EX_GW_NETWORK=xgw >> $GITHUB_ENV echo OVN_ENABLE_EX_GW_NETWORK_BRIDGE=true >> $GITHUB_ENV fi - if [[ "$JOB_NAME" == *"shard-conformance"* ]] && [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then + if [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then echo "ADVERTISE_DEFAULT_NETWORK=true" >> $GITHUB_ENV # Use proper variable declaration with default values diff --git a/contrib/kind-common b/contrib/kind-common index 66cc078d3e..e8bfb7be01 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -751,6 +751,13 @@ deploy_bgp_external_server() { echo "FRR kind network IPv6: ${bgp_network_frr_v6}" $OCI_BIN exec bgpserver ip -6 route replace default via "$bgp_network_frr_v6" fi + # disable the default route to make sure the container only routes accross + # directly connected or learnt networks (doing this at the very end since + # docker changes the routing table when a new network is connected) + docker exec frr ip route delete default + docker exec frr ip route + docker exec frr ip -6 route delete default + docker exec frr ip -6 route } destroy_bgp() { @@ -817,7 +824,7 @@ EOF rm -rf "${FRR_TMP_DIR}" # Add routes for pod networks dynamically into the github runner for return traffic to pass back - if [ -n "${JOB_NAME:-}" ] && [[ "$JOB_NAME" == *"shard-conformance"* ]] && [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then + if [ "$ADVERTISE_DEFAULT_NETWORK" = "true" ]; then echo "Adding routes for Kubernetes pod networks..." NODES=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') echo "Found nodes: $NODES" @@ -835,7 +842,7 @@ EOF # Add IPv4 route if [ -n "$ipv4_subnet" ] && [ -n "$node_ipv4" ]; then echo "Adding IPv4 route for $node ($node_ipv4): $ipv4_subnet" - sudo ip route add $ipv4_subnet via $node_ipv4 + sudo ip route replace $ipv4_subnet via $node_ipv4 fi fi @@ -847,7 +854,7 @@ EOF if [ -n "$ipv6_subnet" ] && [ -n "$node_ipv6" ]; then echo "Adding IPv6 route for $node ($node_ipv6): $ipv6_subnet" - sudo ip -6 route add $ipv6_subnet via $node_ipv6 + sudo ip -6 route replace $ipv6_subnet via $node_ipv6 fi fi done From 90b88fabf98655ae675d36d71a25e4b099145ca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Tue, 27 May 2025 10:36:09 +0000 Subject: [PATCH 018/181] Run almost all control plane tests in BGP lanes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Skip those test that wouldn't be supported or otherwise require additional work. Signed-off-by: Jaime Caamaño Ruiz --- .github/workflows/test.yml | 2 +- test/scripts/e2e-cp.sh | 157 +++++++++++++++++++------------------ 2 files changed, 82 insertions(+), 77 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2cb46a04ee..7965b19d76 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -643,7 +643,7 @@ jobs: elif [ "${{ matrix.target }}" == "network-segmentation" ]; then make -C test control-plane WHAT="Network Segmentation" elif [ "${{ matrix.target }}" == "bgp" ]; then - make -C test control-plane WHAT="BGP" + make -C test control-plane elif [ "${{ matrix.target }}" == "tools" ]; then make -C go-controller build make -C test tools diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index 40d8071f92..59fc1cd01a 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -33,147 +33,152 @@ queries to the hostNetworked server pod on another node shall work for UDP|\ ipv4 pod" SKIPPED_TESTS="" +skip() { + if [ "$SKIPPED_TESTS" != "" ]; then + SKIPPED_TESTS+="|" + fi + SKIPPED_TESTS+=$* +} if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - # No support for these features in dual-stack yet - SKIPPED_TESTS="hybrid.overlay" - else - # Skip sflow in IPv4 since it's a long test (~5 minutes) - # We're validating netflow v5 with an ipv4 cluster, sflow with an ipv6 cluster - SKIPPED_TESTS="Should validate flow data of br-int is sent to an external gateway with sflow|ipv6 pod" - fi + if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then + # No support for these features in dual-stack yet + skip "hybrid.overlay" + else + # Skip sflow in IPv4 since it's a long test (~5 minutes) + # We're validating netflow v5 with an ipv4 cluster, sflow with an ipv6 cluster + skip "Should validate flow data of br-int is sent to an external gateway with sflow|ipv6 pod" + fi fi if [ "$PLATFORM_IPV4_SUPPORT" == false ]; then - SKIPPED_TESTS+="\[IPv4\]" + skip "\[IPv4\]" fi if [ "$OVN_HA" == false ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi # No support for these features in no-ha mode yet # TODO streamline the db delete tests - SKIPPED_TESTS+="recovering from deleting db files while maintaining connectivity|\ -Should validate connectivity before and after deleting all the db-pods at once in HA mode" + skip "recovering from deleting db files while maintaining connectivity" + skip "Should validate connectivity before and after deleting all the db-pods at once in HA mode" else - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - - SKIPPED_TESTS+="Should validate connectivity before and after deleting all the db-pods at once in Non-HA mode|\ - e2e br-int NetFlow export validation" + skip "Should validate connectivity before and after deleting all the db-pods at once in Non-HA mode" + skip "e2e br-int NetFlow export validation" fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi # No support for these tests in IPv6 mode yet - SKIPPED_TESTS+=$IPV6_SKIPPED_TESTS + skip $IPV6_SKIPPED_TESTS fi if [ "$OVN_DISABLE_SNAT_MULTIPLE_GWS" == false ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="e2e multiple external gateway stale conntrack entry deletion validation" + skip "e2e multiple external gateway stale conntrack entry deletion validation" fi if [ "$OVN_GATEWAY_MODE" == "shared" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Should ensure load balancer service|LGW" # See https://github.com/ovn-org/ovn-kubernetes/issues/4138 for details + skip "Should ensure load balancer service|LGW" fi if [ "$OVN_GATEWAY_MODE" == "local" ]; then - # See https://github.com/ovn-org/ovn-kubernetes/labels/ci-ipv6 for details: + # See https://github.com/ovn-org/ovn-kubernetes/labels/ci-ipv6 for details if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Should be allowed by nodeport services|\ -Should successfully create then remove a static pod|\ -Should validate connectivity from a pod to a non-node host address on same node|\ -Should validate connectivity within a namespace of pods on separate nodes|\ -Services" + skip "Should be allowed by nodeport services" + skip "Should successfully create then remove a static pod" + skip "Should validate connectivity from a pod to a non-node host address on same node" + skip "Should validate connectivity within a namespace of pods on separate nodes" + skip "Services" fi fi # skipping the egress ip legacy health check test because it requires two # sequenced rollouts of both ovnkube-node and ovnkube-master that take a lot of # time. -SKIPPED_TESTS+="${SKIPPED_TESTS:+|}disabling egress nodes impeding Legacy health check" +skip "disabling egress nodes impeding Legacy health check" if [ "$ENABLE_MULTI_NET" != "true" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Multi Homing" + skip "Multi Homing" fi if [ "$OVN_NETWORK_QOS_ENABLE" != "true" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="e2e NetworkQoS validation" + skip "e2e NetworkQoS validation" fi # Only run Node IP/MAC address migration tests if they are explicitly requested IP_MIGRATION_TESTS="Node IP and MAC address migration" if [[ "${WHAT}" != "${IP_MIGRATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Node IP and MAC address migration" + skip "Node IP and MAC address migration" fi # Only run Multi node zones interconnect tests if they are explicitly requested MULTI_NODE_ZONES_TESTS="Multi node zones interconnect" if [[ "${WHAT}" != "${MULTI_NODE_ZONES_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Multi node zones interconnect" + skip "Multi node zones interconnect" fi # Only run external gateway tests if they are explicitly requested EXTERNAL_GATEWAY_TESTS="External Gateway" if [[ "${WHAT}" != "${EXTERNAL_GATEWAY_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="External Gateway" + skip "External Gateway" fi # Only run kubevirt virtual machines tests if they are explicitly requested KV_LIVE_MIGRATION_TESTS="Kubevirt Virtual Machines" if [[ "${WHAT}" != "${KV_LIVE_MIGRATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+=$KV_LIVE_MIGRATION_TESTS + skip $KV_LIVE_MIGRATION_TESTS fi # Only run network segmentation tests if they are explicitly requested NETWORK_SEGMENTATION_TESTS="Network Segmentation" if [[ "${WHAT}" != "${NETWORK_SEGMENTATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+=$NETWORK_SEGMENTATION_TESTS + skip $NETWORK_SEGMENTATION_TESTS fi -# Only run bgp tests if they are explicitly requested BGP_TESTS="BGP" -if [[ "${WHAT}" != "${BGP_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" +if [ "$ENABLE_ROUTE_ADVERTISEMENTS" != true ]; then + skip $BGP_TESTS +else + if [ "$ADVERTISE_DEFAULT_NETWORK" = true ]; then + # Some test don't work when the default network is advertised, either because + # the configuration that the test excercises does not make sense for an advertised network, or + # there is some bug or functional gap + # call out case by case + + # pod reached from default network through secondary interface, asymetric, configuration does not make sense + # TODO: perhaps the secondary network attached pods should not be attached to default network + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on the same node" + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on a different node" + + # these tests require metallb but the configuration we do for it is not compatible with the configuration we do to advertise the default network + # TODO: consolidate configuration + skip "Load Balancer Service Tests with MetalLB" + skip "EgressService" + + # tests that specifically expect the node SNAT to happen + # TODO: expect the pod IP where it makes sense + skip "e2e egress firewall policy validation with external containers" + skip "e2e egress IP validation Cluster Default Network \[OVN network\] Using different methods to disable a node's availability for egress Should validate the egress IP functionality against remote hosts" + skip "e2e egress IP validation Cluster Default Network \[OVN network\] Should validate the egress IP SNAT functionality against host-networked pods" + skip "e2e egress IP validation Cluster Default Network Should validate egress IP logic when one pod is managed by more than one egressIP object" + skip "e2e egress IP validation Cluster Default Network Should re-assign egress IPs when node readiness / reachability goes down/up" + skip "Pod to external server PMTUD when a client ovnk pod targeting an external server is created when tests are run towards the agnhost echo server queries to the hostNetworked server pod on another node shall work for UDP" + + # https://issues.redhat.com/browse/OCPBUGS-55028 + skip "e2e egress IP validation Cluster Default Network \[secondary-host-eip\]" + + # https://issues.redhat.com/browse/OCPBUGS-50636 + skip "Services of type NodePort should listen on each host addresses" + skip "Services of type NodePort should work on secondary node interfaces for ETP=local and ETP=cluster when backend pods are also served by EgressIP" + + # https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5240 + skip "e2e control plane test node readiness according to its defaults interface MTU size should get node not ready with a too small MTU" + + # buggy tests that don't work in dual stack mode + skip "Service Hairpin SNAT Should ensure service hairpin traffic is NOT SNATed to hairpin masquerade IP; GR LB" + skip "Services when a nodePort service targeting a pod with hostNetwork:false is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for TCP" + skip "Services when a nodePort service targeting a pod with hostNetwork:true is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for TCP" + skip "Services when a nodePort service targeting a pod with hostNetwork:false is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for UDP" + skip "Services when a nodePort service targeting a pod with hostNetwork:true is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for UDP" fi - SKIPPED_TESTS+=$BGP_TESTS fi # setting these is required to make RuntimeClass tests work ... :/ @@ -182,7 +187,7 @@ export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock export KUBE_CONTAINER_RUNTIME_NAME=containerd export NUM_NODES=2 -FOCUS=$(echo ${@:1} | sed 's/ /\\s/g') +FOCUS=$(echo "${@:1}" | sed 's/ /\\s/g') # Ginkgo test timeout needs to be lower than both github's timeout and go test # timeout to be able to get proper Ginkgo output when it happens. From f84d3f34252561e2ded05fcf39d026b615268107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 5 Jun 2025 18:10:54 +0000 Subject: [PATCH 019/181] Fix HO test flake MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- go-controller/pkg/ovn/master_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 914bf7f326..0c3ba9e7a8 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -1639,7 +1639,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("adding the node becomes possible") - gomega.Expect(oc.retryNodes.ResourceHandler.AddResource(&testNode, false)).To(gomega.Succeed()) + gomega.Eventually(oc.retryNodes.ResourceHandler.AddResource).WithArguments(&testNode, false).Should(gomega.Succeed()) return nil } From 9554ba6e988904cb3341ba4a91de520526e9436a Mon Sep 17 00:00:00 2001 From: Yossi Boaron Date: Thu, 13 Mar 2025 10:09:19 +0200 Subject: [PATCH 020/181] Add dontSNAT subnets rules to mgmtport-snat This PR adds rules to prevent SNAT if source IP belongs to the mgmtport-no-snat-subnets-v4 or mgmtport-no-snat-subnets-v6 sets, which store IPv4 and IPv6 subnets, respectively. Signed-off-by: Yossi Boaron --- .../pkg/node/managementport/port_linux.go | 28 +++++++++++++++++++ go-controller/pkg/types/const.go | 6 ++++ 2 files changed, 34 insertions(+) diff --git a/go-controller/pkg/node/managementport/port_linux.go b/go-controller/pkg/node/managementport/port_linux.go index 378e09b238..4fbd561017 100644 --- a/go-controller/pkg/node/managementport/port_linux.go +++ b/go-controller/pkg/node/managementport/port_linux.go @@ -321,6 +321,18 @@ func setupManagementPortNFTSets() error { Comment: knftables.PtrTo("eTP:Local short-circuit not subject to management port SNAT (IPv6)"), Type: "ipv6_addr . inet_proto . inet_service", }) + tx.Add(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV4, + Comment: knftables.PtrTo("subnets not subject to management port SNAT (IPv4)"), + Type: "ipv4_addr", + Flags: []knftables.SetFlag{knftables.IntervalFlag}, + }) + tx.Add(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV6, + Comment: knftables.PtrTo("subnets not subject to management port SNAT (IPv6)"), + Type: "ipv6_addr", + Flags: []knftables.SetFlag{knftables.IntervalFlag}, + }) err = nft.Run(context.TODO(), tx) if err != nil { @@ -402,6 +414,14 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig "return", ), }) + tx.Add(&knftables.Rule{ + Chain: nftMgmtPortChain, + Rule: knftables.Concat( + "ip saddr", "@", types.NFTMgmtPortNoSNATSubnetsV4, + counterIfDebug, + "return", + ), + }) tx.Add(&knftables.Rule{ Chain: nftMgmtPortChain, Rule: knftables.Concat( @@ -441,6 +461,14 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig "return", ), }) + tx.Add(&knftables.Rule{ + Chain: nftMgmtPortChain, + Rule: knftables.Concat( + "ip6 saddr", "@", types.NFTMgmtPortNoSNATSubnetsV6, + counterIfDebug, + "return", + ), + }) tx.Add(&knftables.Rule{ Chain: nftMgmtPortChain, Rule: knftables.Concat( diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 92e5fc54c1..2acd2d5a23 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -330,4 +330,10 @@ const ( MetricOvsNamespace = "ovs" MetricOvsSubsystemVswitchd = "vswitchd" MetricOvsSubsystemDB = "db" + + // "mgmtport-no-snat-subnets-v4" and "mgmtport-no-snat-subnets-v6" are sets containing + // subnets, indicating traffic that should not be SNATted when passing through the + // management port. + NFTMgmtPortNoSNATSubnetsV4 = "mgmtport-no-snat-subnets-v4" + NFTMgmtPortNoSNATSubnetsV6 = "mgmtport-no-snat-subnets-v6" ) From 707776527e18d065ecf9f00a9d6ad4b298dc0345 Mon Sep 17 00:00:00 2001 From: Yossi Boaron Date: Mon, 14 Apr 2025 14:42:41 +0300 Subject: [PATCH 021/181] Configure mgmtport-no-snat-subnets sets elements Currently traffic gets SNATed at ovn-k8s-mp0 within the mgmtport-snat chain. Since OVNK has transitioned to nftables, this behavior can no longer be overridden. Previously, with iptables, SNAT could be avoided by adding a higher-priority rule in the POSTROUTING chain. However, with nftables, all rules are evaluated before making a final decision, making it impossible to skip SNAT. Some applications, like Submariner, need to preserve the source IP when traffic reaches the destination pod, as certain use cases depend on it. This PR Update mgmtport-no-snat-subnets-v4 and mgmtport-no-snat-subnets-v6 nftables set based on node's annotation values. Signed-off-by: Yossi Boaron --- .../pkg/node/managementport/port_linux.go | 51 +++++++++++++++++++ go-controller/pkg/node/obj_retry_node.go | 23 ++++++++- go-controller/pkg/ovnwebhook/nodeadmission.go | 1 + go-controller/pkg/util/node_annotations.go | 43 ++++++++++++++-- 4 files changed, 112 insertions(+), 6 deletions(-) diff --git a/go-controller/pkg/node/managementport/port_linux.go b/go-controller/pkg/node/managementport/port_linux.go index 4fbd561017..480c6f2789 100644 --- a/go-controller/pkg/node/managementport/port_linux.go +++ b/go-controller/pkg/node/managementport/port_linux.go @@ -485,6 +485,57 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig return nil } +func UpdateNoSNATSubnetsSets(node *corev1.Node, getSubnetsFn func(*corev1.Node) ([]string, error)) error { + subnetsList, err := getSubnetsFn(node) + if err != nil { + return fmt.Errorf("error retrieving subnets list: %w", err) + } + + subNetV4 := make([]*knftables.Element, 0) + subNetV6 := make([]*knftables.Element, 0) + + for _, subnet := range subnetsList { + if utilnet.IPFamilyOfCIDRString(subnet) == utilnet.IPv4 { + subNetV4 = append(subNetV4, + &knftables.Element{ + Set: types.NFTMgmtPortNoSNATSubnetsV4, + Key: []string{subnet}, + }, + ) + } + if utilnet.IPFamilyOfCIDRString(subnet) == utilnet.IPv6 { + subNetV6 = append(subNetV6, + &knftables.Element{ + Set: types.NFTMgmtPortNoSNATSubnetsV6, + Key: []string{subnet}, + }, + ) + } + + } + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables: %v", err) + } + + tx := nft.NewTransaction() + tx.Flush(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV4, + }) + tx.Flush(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV6, + }) + + for _, elem := range subNetV4 { + tx.Add(elem) + } + for _, elem := range subNetV6 { + tx.Add(elem) + } + + return nft.Run(context.TODO(), tx) +} + // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. diff --git a/go-controller/pkg/node/obj_retry_node.go b/go-controller/pkg/node/obj_retry_node.go index 148bb3cc40..9c9657678e 100644 --- a/go-controller/pkg/node/obj_retry_node.go +++ b/go-controller/pkg/node/obj_retry_node.go @@ -11,6 +11,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -121,7 +122,7 @@ func (h *nodeEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, erro if !ok { return false, fmt.Errorf("could not cast obj2 of type %T to *kapi.Node", obj2) } - return reflect.DeepEqual(node1.Status.Addresses, node2.Status.Addresses), nil + return reflect.DeepEqual(node1.Status.Addresses, node2.Status.Addresses) && reflect.DeepEqual(node1.Annotations, node2.Annotations), nil default: return false, fmt.Errorf("no object comparison for type %s", h.objType) @@ -175,6 +176,13 @@ func (h *nodeEventHandler) AddResource(obj interface{}, _ bool) error { node := obj.(*corev1.Node) // if it's our node that is changing, then nothing to do as we dont add our own IP to the nftables rules if node.Name == h.nc.name { + if util.NodeDontSNATSubnetAnnotationExist(node) { + err := managementport.UpdateNoSNATSubnetsSets(node, util.ParseNodeDontSNATSubnetsList) + if err != nil { + return fmt.Errorf("error updating no snat subnets sets: %w", err) + } + } + return nil } return h.nc.addOrUpdateNode(node) @@ -218,6 +226,15 @@ func (h *nodeEventHandler) UpdateResource(oldObj, newObj interface{}, _ bool) er // if it's our node that is changing, then nothing to do as we dont add our own IP to the nftables rules if newNode.Name == h.nc.name { + + // if node's dont SNAT subnet annotation changed sync nftables + if !reflect.DeepEqual(oldNode.Annotations, newNode.Annotations) && + util.NodeDontSNATSubnetAnnotationChanged(oldNode, newNode) { + err := managementport.UpdateNoSNATSubnetsSets(newNode, util.ParseNodeDontSNATSubnetsList) + if err != nil { + return fmt.Errorf("error updating no snat subnets sets: %w", err) + } + } return nil } @@ -273,6 +290,10 @@ func (h *nodeEventHandler) DeleteResource(obj, _ interface{}) error { case factory.NodeType: h.nc.deleteNode(obj.(*corev1.Node)) + _ = managementport.UpdateNoSNATSubnetsSets(obj.(*corev1.Node), func(_ *corev1.Node) ([]string, error) { + return []string{}, nil + }) + return nil default: diff --git a/go-controller/pkg/ovnwebhook/nodeadmission.go b/go-controller/pkg/ovnwebhook/nodeadmission.go index b21a51bc87..08509903c9 100644 --- a/go-controller/pkg/ovnwebhook/nodeadmission.go +++ b/go-controller/pkg/ovnwebhook/nodeadmission.go @@ -34,6 +34,7 @@ var commonNodeAnnotationChecks = map[string]checkNodeAnnot{ util.OvnNodeMasqCIDR: nil, util.OvnNodeGatewayMtuSupport: nil, util.OvnNodeManagementPort: nil, + util.OvnNodeDontSNATSubnets: nil, util.OvnNodeChassisID: func(v annotationChange, _ string) error { if v.action == removed { return fmt.Errorf("%s cannot be removed", util.OvnNodeChassisID) diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index d3be36f2db..4e9a984748 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -154,6 +154,9 @@ const ( // ovnNodeEncapIPs is used to indicate encap IPs set on the node OVNNodeEncapIPs = "k8s.ovn.org/node-encap-ips" + + // OvnNodeDontSNATSubnets is a user assigned source subnets that should avoid SNAT at ovn-k8s-mp0 interface + OvnNodeDontSNATSubnets = "k8s.ovn.org/node-ingress-snat-exclude-subnets" ) type L3GatewayConfig struct { @@ -1115,15 +1118,45 @@ func ParseNodeHostCIDRsExcludeOVNNetworks(node *corev1.Node) ([]string, error) { } func ParseNodeHostCIDRsList(node *corev1.Node) ([]string, error) { - addrAnnotation, ok := node.Annotations[OVNNodeHostCIDRs] + return parseNodeAnnotationList(node, OVNNodeHostCIDRs) +} + +func ParseNodeDontSNATSubnetsList(node *corev1.Node) ([]string, error) { + return parseNodeAnnotationList(node, OvnNodeDontSNATSubnets) +} + +// NodeDontSNATSubnetAnnotationChanged returns true if the OvnNodeDontSNATSubnets in the corev1.Nodes doesn't match +func NodeDontSNATSubnetAnnotationChanged(oldNode, newNode *corev1.Node) bool { + oldVal, oldOk := oldNode.Annotations[OvnNodeDontSNATSubnets] + newVal, newOk := newNode.Annotations[OvnNodeDontSNATSubnets] + + if oldOk != newOk { + return true + } + + if oldOk && newOk && oldVal != newVal { + return true + } + + return false +} + +// NodeDontSNATSubnetAnnotationExist returns true OvnNodeDontSNATSubnets annotation key exists in node annotation +func NodeDontSNATSubnetAnnotationExist(node *corev1.Node) bool { + _, ok := node.Annotations[OvnNodeDontSNATSubnets] + return ok +} + +func parseNodeAnnotationList(node *corev1.Node, annotationKey string) ([]string, error) { + annotationValue, ok := node.Annotations[annotationKey] if !ok { - return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodeHostCIDRs, node.Name) + return []string{}, nil } var cfg []string - if err := json.Unmarshal([]byte(addrAnnotation), &cfg); err != nil { - return nil, fmt.Errorf("failed to unmarshal host cidrs annotation %s for node %q: %v", - addrAnnotation, node.Name, err) + if err := json.Unmarshal([]byte(annotationValue), &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s annotation %s for node %q: %v", + annotationKey, annotationValue, node.Name, err) } return cfg, nil } From 182ba9c2e12c58e530b13d54f73078687fb5fcb1 Mon Sep 17 00:00:00 2001 From: Yossi Boaron Date: Thu, 29 May 2025 20:51:17 +0300 Subject: [PATCH 022/181] Unit tests for node ingress snat exclude annotation Signed-off-by: Yossi Boaron --- .../default_node_network_controller_test.go | 323 ++++++++++++++++++ .../pkg/node/gateway_init_linux_test.go | 3 + .../pkg/node/managementport/port_linux.go | 4 +- .../node/managementport/port_linux_test.go | 2 +- .../pkg/util/node_annotations_unit_test.go | 171 ++++++++++ 5 files changed, 500 insertions(+), 3 deletions(-) diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index 368b333800..cb7087ff1b 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "strings" "sync" "time" @@ -21,6 +22,7 @@ import ( adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -1238,4 +1240,325 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } }) + Describe("node ingress snat exclude subnets", func() { + + var ( + testNS ns.NetNS + nc *DefaultNodeNetworkController + app *cli.App + ) + + const ( + nodeName = "my-node" + ) + + BeforeEach(func() { + var err error + testNS, err = testutils.NewNS() + Expect(err).NotTo(HaveOccurred()) + Expect(config.PrepareTestConfig()).To(Succeed()) + + app = cli.NewApp() + app.Name = "test" + app.Flags = config.Flags + }) + + AfterEach(func() { + util.ResetNetLinkOpMockInst() // other tests in this package rely directly on netlink (e.g. gateway_init_linux_test.go) + Expect(testNS.Close()).To(Succeed()) + }) + + Context("with a cluster in IPv4 mode", func() { + const ( + ethName string = "lo1337" + nodeIP string = "169.254.254.60" + ethCIDR string = nodeIP + "/24" + ) + var link netlink.Link + + BeforeEach(func() { + config.IPv4Mode = true + config.IPv6Mode = false + config.Gateway.Mode = config.GatewayModeShared + + // Note we must do this in default netNS because + // nc.WatchNodes() will spawn goroutines which we cannot lock to the testNS + ovntest.AddLink(ethName) + + var err error + link, err = netlink.LinkByName(ethName) + Expect(err).NotTo(HaveOccurred()) + err = netlink.LinkSetUp(link) + Expect(err).NotTo(HaveOccurred()) + + // Add an IP address + addr, err := netlink.ParseAddr(ethCIDR) + Expect(err).NotTo(HaveOccurred()) + addr.Scope = int(netlink.SCOPE_UNIVERSE) + err = netlink.AddrAdd(link, addr) + Expect(err).NotTo(HaveOccurred()) + + }) + + AfterEach(func() { + err := netlink.LinkDel(link) + Expect(err).NotTo(HaveOccurred()) + }) + + ovntest.OnSupportedPlatformsIt("empty annotation on startup", func() { + + app.Action = func(_ *cli.Context) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{}, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: nodeIP, + }, + }, + }, + } + + nft := nodenft.SetFakeNFTablesHelper() + + kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ + Items: []corev1.Node{node}, + }) + fakeClient := &util.OVNNodeClientset{ + KubeClient: kubeFakeClient, + AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + } + + stop := make(chan struct{}) + wf, err := factory.NewNodeWatchFactory(fakeClient, nodeName) + Expect(err).NotTo(HaveOccurred()) + wg := &sync.WaitGroup{} + defer func() { + close(stop) + wg.Wait() + wf.Shutdown() + }() + + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + routeManager := routemanager.NewController() + cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) + nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) + nc.initRetryFrameworkForNode() + err = setupPMTUDNFTSets() + Expect(err).NotTo(HaveOccurred()) + err = setupPMTUDNFTChain() + Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } + nc.Gateway = &gateway{ + openflowManager: &openflowManager{ + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, + }, + } + + err = managementport.SetupManagementPortNFTSets() + Expect(err).NotTo(HaveOccurred()) + + // must run route manager manually which is usually started with nc.Start() + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + nc.routeManager.Run(stop, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + }() + By("no nftables elements should present at startup") + + err = nc.WatchNodes() + Expect(err).NotTo(HaveOccurred()) + Expect(nft.Dump()).NotTo(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }")) + Expect(nft.Dump()).NotTo(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }")) + + By("adding subnets to node annotation should update nftables elements") + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("adding extra subnets to node annotation should update nftables elements") + + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24","fd00::/64","192.169.1.0/24","fd11::/64"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("deleting node should remove nftables elements") + err = kubeFakeClient.CoreV1().Nodes().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + + }).WithTimeout(2 * time.Second).Should(BeTrue()) + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }) + + ovntest.OnSupportedPlatformsIt("non-empty annotation on startup", func() { + + app.Action = func(_ *cli.Context) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + util.OvnNodeDontSNATSubnets: `["192.168.1.0/24","fd00::/64"]`, + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: nodeIP, + }, + }, + }, + } + + nft := nodenft.SetFakeNFTablesHelper() + + kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ + Items: []corev1.Node{node}, + }) + fakeClient := &util.OVNNodeClientset{ + KubeClient: kubeFakeClient, + AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + } + + stop := make(chan struct{}) + wf, err := factory.NewNodeWatchFactory(fakeClient, nodeName) + Expect(err).NotTo(HaveOccurred()) + wg := &sync.WaitGroup{} + defer func() { + close(stop) + wg.Wait() + wf.Shutdown() + }() + + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + routeManager := routemanager.NewController() + cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) + nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) + nc.initRetryFrameworkForNode() + err = setupPMTUDNFTSets() + Expect(err).NotTo(HaveOccurred()) + err = setupPMTUDNFTChain() + Expect(err).NotTo(HaveOccurred()) + defaultNetConfig := &bridgeUDNConfiguration{ + ofPortPatch: "patch-breth0_ov", + } + nc.Gateway = &gateway{ + openflowManager: &openflowManager{ + flowCache: map[string][]string{}, + defaultBridge: &bridgeConfiguration{ + netConfig: map[string]*bridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + }, + }, + } + + err = managementport.SetupManagementPortNFTSets() + Expect(err).NotTo(HaveOccurred()) + + // must run route manager manually which is usually started with nc.Start() + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + nc.routeManager.Run(stop, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + }() + By("expected nftables elements should present at startup") + + err = nc.WatchNodes() + Expect(err).NotTo(HaveOccurred()) + Expect(nft.Dump()).To(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }")) + Expect(nft.Dump()).To(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }")) + + By("editing subnets on node annotation should update nftables elements") + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("adding extra subnets to node annotation should update nftables elements") + + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24","fd00::/64","192.169.1.0/24","fd11::/64"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("deleting node should remove nftables elements") + err = kubeFakeClient.CoreV1().Nodes().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + + }).WithTimeout(2 * time.Second).Should(BeTrue()) + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }) + + }) + }) }) diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 47162a98d0..e9f248c419 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -56,11 +56,14 @@ add table inet ovn-kubernetes add set inet ovn-kubernetes mgmtport-no-snat-nodeports { type inet_proto . inet_service ; comment "NodePorts not subject to management port SNAT" ; } add set inet ovn-kubernetes mgmtport-no-snat-services-v4 { type ipv4_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv4)" ; } add set inet ovn-kubernetes mgmtport-no-snat-services-v6 { type ipv6_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv6)" ; } +add set inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { type ipv4_addr ; flags interval ; comment "subnets not subject to management port SNAT (IPv4)" ; } +add set inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { type ipv6_addr ; flags interval ; comment "subnets not subject to management port SNAT (IPv6)" ; } add chain inet ovn-kubernetes mgmtport-snat { type nat hook postrouting priority 100 ; comment "OVN SNAT to Management Port" ; } add rule inet ovn-kubernetes mgmtport-snat oifname != %s return add rule inet ovn-kubernetes mgmtport-snat meta nfproto ipv4 ip saddr 10.1.1.2 counter return add rule inet ovn-kubernetes mgmtport-snat meta l4proto . th dport @mgmtport-no-snat-nodeports counter return add rule inet ovn-kubernetes mgmtport-snat ip daddr . meta l4proto . th dport @mgmtport-no-snat-services-v4 counter return +add rule inet ovn-kubernetes mgmtport-snat ip saddr @mgmtport-no-snat-subnets-v4 counter return add rule inet ovn-kubernetes mgmtport-snat counter snat ip to 10.1.1.2 ` diff --git a/go-controller/pkg/node/managementport/port_linux.go b/go-controller/pkg/node/managementport/port_linux.go index 480c6f2789..f6ea8676dd 100644 --- a/go-controller/pkg/node/managementport/port_linux.go +++ b/go-controller/pkg/node/managementport/port_linux.go @@ -101,7 +101,7 @@ func NewManagementPortController( } // setup NFT sets early as gateway initialization depends on it - err = setupManagementPortNFTSets() + err = SetupManagementPortNFTSets() if err != nil { return nil, err } @@ -299,7 +299,7 @@ func setupManagementPortConfig(link netlink.Link, cfg *managementPortConfig, rou // setupManagementPortNFTSets sets up the NFT sets that the management port SNAR // rules rely on. These sets are written to by other componets so they are setup // independantly and as early as possible. -func setupManagementPortNFTSets() error { +func SetupManagementPortNFTSets() error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return err diff --git a/go-controller/pkg/node/managementport/port_linux_test.go b/go-controller/pkg/node/managementport/port_linux_test.go index d6d99d7577..13d7641c11 100644 --- a/go-controller/pkg/node/managementport/port_linux_test.go +++ b/go-controller/pkg/node/managementport/port_linux_test.go @@ -783,7 +783,7 @@ var _ = Describe("Management Port tests", func() { ipv4: &fakeMgmtPortIPFamilyConfig, netInfo: netInfo, } - err := setupManagementPortNFTSets() + err := SetupManagementPortNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupManagementPortNFTChain(types.K8sMgmtIntfName, &fakeMgmtPortConfig) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/util/node_annotations_unit_test.go b/go-controller/pkg/util/node_annotations_unit_test.go index 463a83f31b..f987684e82 100644 --- a/go-controller/pkg/util/node_annotations_unit_test.go +++ b/go-controller/pkg/util/node_annotations_unit_test.go @@ -829,3 +829,174 @@ func TestParseUDNLayer2NodeGRLRPTunnelIDs(t *testing.T) { }) } } + +func TestNodeDontSNATSubnetAnnotationChanged(t *testing.T) { + tests := []struct { + desc string + oldNode *corev1.Node + newNode *corev1.Node + result bool + }{ + { + desc: "annotation added", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + result: true, + }, + { + desc: "annotation removed", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + result: true, + }, + { + desc: "annotation value changed", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["10.0.0.0/16"]`, + }, + }, + }, + result: true, + }, + { + desc: "false: annotation unchanged", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + result: false, + }, + { + desc: "annotation absent in both", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + result: false, + }, + } + + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + result := NodeDontSNATSubnetAnnotationChanged(tc.oldNode, tc.newNode) + assert.Equal(t, tc.result, result) + }) + } +} + +func TestParseNodeDontSNATSubnetsList(t *testing.T) { + tests := []struct { + desc string + node *corev1.Node + expected []string + expectError bool + }{ + { + desc: "no annotation present", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-node", + Annotations: map[string]string{}, + }, + }, + expected: []string{}, + expectError: false, + }, + { + desc: "valid annotation list with IPv4 and IPv6 CIDRs", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24", "fd00::/64", "10.0.0.0/16"]`, + }, + }, + }, + expected: []string{"192.168.1.0/24", "fd00::/64", "10.0.0.0/16"}, + expectError: false, + }, + { + desc: "invalid annotation value (not JSON)", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `not-a-json`, + }, + }, + }, + expected: nil, + expectError: true, + }, + { + desc: "empty JSON array annotation", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node4", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `[]`, + }, + }, + }, + expected: []string{}, + expectError: false, + }, + } + + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + result, err := ParseNodeDontSNATSubnetsList(tc.node) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expected, result) + } + }) + } +} From cb32656996ea7b4718b0985cbc53c4d9ef74a10f Mon Sep 17 00:00:00 2001 From: thisisobate Date: Tue, 10 Jun 2025 14:28:08 +0100 Subject: [PATCH 023/181] chore: update footer with new LF trademark disclaimer Signed-off-by: thisisobate --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index e21134af5a..2f9d9cd495 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -7,7 +7,7 @@ extra_css: - stylesheets/extra.css site_dir: site docs_dir: docs -copyright: The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see Trademark Usage. +copyright: Copyright © OVN-Kubernetes a Series of LF Projects, LLC. For website terms of use, trademark policy and other project policies please see LF Projects Policies. theme: name: material icon: From 188309de2876c686fb9c5e19e8efd01af3226873 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 6 Jun 2025 17:27:34 -0400 Subject: [PATCH 024/181] Perf optimization: Stop every node event from triggering EIP Node update Everytime the node updates it is triggering addEgressNode, which does a route add operation libovsdb txn for default network and every UDN, initiated from the default controller egress node logic. Only runs when needed now. Signed-off-by: Tim Rozet --- .../pkg/ovn/default_network_controller.go | 41 +++++++++++++++---- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index f6b3727c55..d6a0231ea6 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -131,6 +131,7 @@ type DefaultNetworkController struct { syncZoneICFailed sync.Map syncHostNetAddrSetFailed sync.Map syncEIPNodeRerouteFailed sync.Map + syncEIPNodeFailed sync.Map // variable to determine if all pods present on the node during startup have been processed // updated atomically @@ -843,8 +844,10 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from h.oc.eIPC.nodeZoneState.UnlockKey(node.Name) shouldSyncReroute := true + shouldSyncEIPNode := true if fromRetryLoop { _, shouldSyncReroute = h.oc.syncEIPNodeRerouteFailed.Load(node.Name) + _, shouldSyncEIPNode = h.oc.syncEIPNodeFailed.Load(node.Name) } if shouldSyncReroute { @@ -862,10 +865,19 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from h.oc.syncEIPNodeRerouteFailed.Store(node.Name, true) return err } + h.oc.syncEIPNodeRerouteFailed.Delete(node.Name) } - // Add routing specific to Egress IP NOTE: GARP configuration that - // Egress IP depends on is added from the gateway reconciliation logic - return h.oc.eIPC.addEgressNode(node) + if shouldSyncEIPNode { + // Add routing specific to Egress IP NOTE: GARP configuration that + // Egress IP depends on is added from the gateway reconciliation logic + err := h.oc.eIPC.addEgressNode(node) + if err != nil { + h.oc.syncEIPNodeFailed.Store(node.Name, true) + return err + } + h.oc.syncEIPNodeFailed.Delete(node.Name) + } + return nil case factory.NamespaceType: ns, ok := obj.(*corev1.Namespace) @@ -1038,24 +1050,36 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int h.oc.eIPC.nodeZoneState.Store(newNode.Name, h.oc.isLocalZoneNode(newNode)) h.oc.eIPC.nodeZoneState.UnlockKey(newNode.Name) - _, failed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) + _, syncEIPNodeRerouteFailed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) // node moved from remote -> local or previously failed reroute config - if (!h.oc.isLocalZoneNode(oldNode) || failed) && h.oc.isLocalZoneNode(newNode) { + if (!h.oc.isLocalZoneNode(oldNode) || syncEIPNodeRerouteFailed) && h.oc.isLocalZoneNode(newNode) { if err := h.oc.eIPC.ensureDefaultNoRerouteQoSRules(newNode.Name); err != nil { return err } } // update the nodeIP in the default-reRoute (102 priority) destination address-set - if failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { + if syncEIPNodeRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { klog.Infof("Egress IP detected IP address change for node %s. Updating no re-route policies", newNode.Name) err := h.oc.eIPC.ensureDefaultNoRerouteNodePolicies() if err != nil { + h.oc.syncEIPNodeRerouteFailed.Store(newNode.Name, true) return err } + h.oc.syncEIPNodeRerouteFailed.Delete(newNode.Name) } - h.oc.syncEIPNodeRerouteFailed.Delete(newNode.Name) - return h.oc.eIPC.addEgressNode(newNode) + + _, syncEIPNodeFailed := h.oc.syncEIPNodeFailed.Load(newNode.Name) + // update only if the GR join IP changed for default network + if syncEIPNodeFailed || joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) { + err := h.oc.eIPC.addEgressNode(newNode) + if err != nil { + h.oc.syncEIPNodeFailed.Store(newNode.Name, true) + return err + } + h.oc.syncEIPNodeFailed.Delete(newNode.Name) + } + return nil case factory.NamespaceType: oldNs, newNs := oldObj.(*corev1.Namespace), newObj.(*corev1.Namespace) @@ -1118,6 +1142,7 @@ func (h *defaultNetworkControllerEventHandler) DeleteResource(obj, cachedObj int h.oc.eIPC.nodeZoneState.Delete(node.Name) h.oc.eIPC.nodeZoneState.UnlockKey(node.Name) h.oc.syncEIPNodeRerouteFailed.Delete(node.Name) + h.oc.syncEIPNodeFailed.Delete(node.Name) return nil case factory.NamespaceType: From bf5b8d41d2d137021c7703df3f9bce839d55f735 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 6 Jun 2025 17:34:07 -0400 Subject: [PATCH 025/181] Stop calling CreateDefaultRouteToExternal for UDNs by default This is unnecessary because there is another UDN path that will call this code: secondary_layer2/3_controller -> addUpdateLocalNodeEvent -> ensureRouterPoliciesForNetwork -> CreateDefaultRouteToExternal Signed-off-by: Tim Rozet --- go-controller/pkg/ovn/egressip.go | 24 +-- go-controller/pkg/ovn/egressip_udn_l3_test.go | 190 +++++++++++++++++- .../secondary_layer3_network_controller.go | 2 - 3 files changed, 187 insertions(+), 29 deletions(-) diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 5ca127140b..924008a45a 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -2050,28 +2050,10 @@ func (e *EgressIPController) addEgressNode(node *corev1.Node) error { // NOTE3: When the node gets deleted we do not remove this route intentionally because // on IC if the node is gone, then the ovn_cluster_router is also gone along with all // the routes on it. - processNetworkFn := func(ni util.NetInfo) error { - if ni.TopologyType() == types.Layer2Topology || len(ni.Subnets()) == 0 { - return nil - } - if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), - ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets()); err != nil { - return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) - } - return nil - } ni := e.networkManager.GetNetwork(types.DefaultNetworkName) - if ni == nil { - return fmt.Errorf("failed to get default network from NAD controller") - } - if err := processNetworkFn(ni); err != nil { - return fmt.Errorf("failed to process default network: %v", err) - } - if !isEgressIPForUDNSupported() { - return nil - } - if err := e.networkManager.DoWithLock(processNetworkFn); err != nil { - return fmt.Errorf("failed to process all user defined networks route to external: %v", err) + if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), + ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets()); err != nil { + return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) } } } diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index 5dae356208..1a5497d599 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -17,6 +17,7 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -98,6 +99,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol config.OVNKubernetesFeature.EnableMultiNetwork = true config.Gateway.Mode = config.GatewayModeShared config.OVNKubernetesFeature.EgressIPNodeHealthCheckPort = 1234 + config.Gateway.V4MasqueradeSubnet = dummyMasqueradeSubnet().String() app = cli.NewApp() app.Name = "test" @@ -1195,9 +1197,11 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") nUDN.IP = iUDN + secConInfo.bnc.zone = node1.Name secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(secConInfo.bnc.WatchNodes()).To(gomega.Succeed()) egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) @@ -1325,6 +1329,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -1333,6 +1350,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -1345,14 +1363,57 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + "requested-tnl-key": "16711685", + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -1457,6 +1518,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -1465,6 +1539,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), }, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -1475,14 +1550,57 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + "requested-tnl-key": "16711685", + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -2415,6 +2533,8 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) secConInfo, ok := fakeOvn.secondaryControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) + secConInfo.bnc.zone = node1.Name + gomega.Expect(secConInfo.bnc.WatchNodes()).To(gomega.Succeed()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") nUDN.IP = iUDN @@ -2553,6 +2673,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -2561,6 +2694,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -2573,14 +2707,58 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary, + }, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + "requested-tnl-key": "16711685", + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index cb9f82d08f..9005020934 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -107,7 +107,6 @@ func (h *secondaryLayer3NetworkControllerEventHandler) AddResource(obj interface if !ok { return fmt.Errorf("could not cast %T object to *kapi.Node", obj) } - if h.oc.isLocalZoneNode(node) { var nodeParams *nodeSyncs if fromRetryLoop { @@ -704,7 +703,6 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 var hostSubnets []*net.IPNet var errs []error var err error - _, _ = oc.localZoneNodes.LoadOrStore(node.Name, true) if noHostSubnet := util.NoHostSubnet(node); noHostSubnet { From a0083457163cfa9d882a1b601a81c58daa1941aa Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 6 Jun 2025 17:47:06 -0400 Subject: [PATCH 026/181] L2 and L3 UDN should reconfigure reroute policies when join IP changes Signed-off-by: Tim Rozet --- go-controller/pkg/ovn/secondary_layer2_network_controller.go | 3 ++- go-controller/pkg/ovn/secondary_layer3_network_controller.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index c11ca2a2ae..69662caa96 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -180,7 +180,8 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) _, syncRerouteFailed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) - shouldSyncReroute := syncRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) + shouldSyncReroute := syncRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) || + joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) nodeSyncsParam = &nodeSyncs{ syncMgmtPort: shouldSyncMgmtPort, syncGw: shouldSyncGW, diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index 9005020934..f8601a6b37 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -186,7 +186,8 @@ func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, ne hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) _, failed = h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) - syncReroute := failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) + syncReroute := failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) || + joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) nodeSyncsParam = &nodeSyncs{ syncNode: nodeSync, syncClusterRouterPort: clusterRtrSync, From 5308cbf6e439f53b7ace90a1fbe005fda4729349 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 6 Jun 2025 18:49:08 -0400 Subject: [PATCH 027/181] CreateDefaultRouteToExternal should use node GR IP annotations This function is called from many different threads. Relying on nbdb for the GR IP is not safe here, as the GR IP could be changing due to a k8s event, and the route will be wrongly configured with an old IP still in OVN NBDB. Signed-off-by: Tim Rozet --- go-controller/pkg/kubevirt/router.go | 11 +++++++++- go-controller/pkg/libovsdb/util/router.go | 7 +------ .../pkg/libovsdb/util/router_test.go | 5 ++++- .../egressservice/egressservice_zone.go | 2 +- .../egressservice/egressservice_zone_node.go | 8 +++++++- go-controller/pkg/ovn/egressip.go | 14 ++++++++++--- go-controller/pkg/ovn/egressip_test.go | 2 ++ go-controller/pkg/ovn/egressip_udn_l2_test.go | 8 ++++---- go-controller/pkg/ovn/egressip_udn_l3_test.go | 20 +++++++++++++++---- go-controller/pkg/ovn/egressservices_test.go | 1 + go-controller/pkg/ovn/kubevirt_test.go | 3 +++ go-controller/pkg/ovn/ovn.go | 2 +- .../secondary_layer2_network_controller.go | 2 +- .../secondary_layer3_network_controller.go | 2 +- 14 files changed, 63 insertions(+), 24 deletions(-) diff --git a/go-controller/pkg/kubevirt/router.go b/go-controller/pkg/kubevirt/router.go index f6354d50f9..06a6499e1f 100644 --- a/go-controller/pkg/kubevirt/router.go +++ b/go-controller/pkg/kubevirt/router.go @@ -95,7 +95,16 @@ func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, if config.OVNKubernetesFeature.EnableInterconnect { // NOTE: EIP & ESVC use same route and if this is already present thanks to those features, // this will be a no-op - if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, types.GWRouterPrefix+pod.Spec.NodeName, clusterSubnets); err != nil { + node, err := watchFactory.GetNode(pod.Spec.NodeName) + if err != nil { + return fmt.Errorf("failed getting to list node %q for pod %s/%s: %w", pod.Spec.NodeName, pod.Namespace, pod.Name, err) + } + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", node.Name, err) + } + if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, + types.GWRouterPrefix+pod.Spec.NodeName, clusterSubnets, gatewayIPs); err != nil { return err } } diff --git a/go-controller/pkg/libovsdb/util/router.go b/go-controller/pkg/libovsdb/util/router.go index 12d7db3b27..12ee755d28 100644 --- a/go-controller/pkg/libovsdb/util/router.go +++ b/go-controller/pkg/libovsdb/util/router.go @@ -13,7 +13,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -34,11 +33,7 @@ import ( // (TODO: FIXME): With this route, we are officially breaking support for IC with zones that have multiple-nodes // NOTE: This route is exactly the same as what is added by pod-live-migration feature and we keep the route exactly // same across the 3 features so that if the route already exists on the node, this is just a no-op -func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error { - gatewayIPs, err := GetLRPAddrs(nbClient, types.GWRouterToJoinSwitchPrefix+gwRouterName) - if err != nil { - return fmt.Errorf("attempt at finding node gateway router %s network information failed, err: %w", gwRouterName, err) - } +func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry, gatewayIPs []*net.IPNet) error { for _, clusterSubnet := range clusterSubnets { isClusterSubnetIPV6 := utilnet.IsIPv6String(clusterSubnet.CIDR.IP.String()) gatewayIP, err := util.MatchFirstIPNetFamily(isClusterSubnetIPV6, gatewayIPs) diff --git a/go-controller/pkg/libovsdb/util/router_test.go b/go-controller/pkg/libovsdb/util/router_test.go index e2047f57a8..6b0b325189 100644 --- a/go-controller/pkg/libovsdb/util/router_test.go +++ b/go-controller/pkg/libovsdb/util/router_test.go @@ -31,6 +31,9 @@ func TestCreateDefaultRouteToExternal(t *testing.T) { gwRouterPortName := types.GWRouterToJoinSwitchPrefix + gwRouterName gwRouterIPAddressV4 := "100.64.0.3" gwRouterIPAddressV6 := "fd98::3" + gwRouterIPAddressV4CIDR := fmt.Sprintf("%s/32", gwRouterIPAddressV4) + gwRouterIPAddressV6CIDR := fmt.Sprintf("%s/128", gwRouterIPAddressV6) + gatewayIPs := []*net.IPNet{ovntest.MustParseIPNet(gwRouterIPAddressV4CIDR), ovntest.MustParseIPNet(gwRouterIPAddressV6CIDR)} gwRouterPort := &nbdb.LogicalRouterPort{ UUID: gwRouterPortName + "-uuid", Name: gwRouterPortName, @@ -228,7 +231,7 @@ func TestCreateDefaultRouteToExternal(t *testing.T) { tc.preTestAction() } - if err = CreateDefaultRouteToExternal(nbClient, ovnClusterRouterName, gwRouterName, config.Default.ClusterSubnets); err != nil { + if err = CreateDefaultRouteToExternal(nbClient, ovnClusterRouterName, gwRouterName, config.Default.ClusterSubnets, gatewayIPs); err != nil { t.Fatal(fmt.Errorf("failed to run CreateDefaultRouteToExternal: %v", err)) } diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go index 5f19a1001b..23f9f8f665 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go @@ -50,7 +50,7 @@ type InitClusterEgressPoliciesFunc func(client libovsdbclient.Client, addressSet type EnsureNoRerouteNodePoliciesFunc func(client libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, networkName, controllerName, clusterRouter string, nodeLister corelisters.NodeLister, v4, v6 bool) error type DeleteLegacyDefaultNoRerouteNodePoliciesFunc func(nbClient libovsdbclient.Client, clusterRouter, nodeName string) error -type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error +type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry, gatewayIPs []*net.IPNet) error type Controller struct { // network information diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go index b3a5f6c103..94db811bb2 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go @@ -151,9 +151,15 @@ func (c *Controller) syncNode(key string) error { return nil } + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(n, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", n.Name, err) + } + // At this point the node exists and is ready if config.OVNKubernetesFeature.EnableInterconnect && c.zone != types.OvnDefaultZone && c.isNodeInLocalZone(n) { - if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), c.GetNetworkScopedGWRouterName(nodeName), c.Subnets()); err != nil { + if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), + c.GetNetworkScopedGWRouterName(nodeName), c.Subnets(), gatewayIPs); err != nil { return err } } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 924008a45a..e79b9b29c5 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -2051,8 +2051,12 @@ func (e *EgressIPController) addEgressNode(node *corev1.Node) error { // on IC if the node is gone, then the ovn_cluster_router is also gone along with all // the routes on it. ni := e.networkManager.GetNetwork(types.DefaultNetworkName) + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", node.Name, err) + } if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), - ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets()); err != nil { + ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets(), gatewayIPs); err != nil { return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) } } @@ -3121,7 +3125,7 @@ func createDefaultNoRerouteServicePolicies(nbClient libovsdbclient.Client, netwo return nil } -func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo) error { +func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo, node *corev1.Node) error { e.nodeUpdateMutex.Lock() defer e.nodeUpdateMutex.Unlock() subnetEntries := ni.Subnets() @@ -3146,8 +3150,12 @@ func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo) err return fmt.Errorf("failed to ensure no reroute node policies for network %s: %v", ni.GetNetworkName(), err) } if config.OVNKubernetesFeature.EnableInterconnect && ni.TopologyType() == types.Layer3Topology { + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, ni.GetNetworkName()) + if err != nil { + return fmt.Errorf("failed to get %q network gateway router join IPs for node %q, err: %w", ni.GetNetworkName(), node.Name, err) + } if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, routerName, - ni.GetNetworkScopedGWRouterName(localNode), subnetEntries); err != nil { + ni.GetNetworkScopedGWRouterName(localNode), subnetEntries, gatewayIPs); err != nil { return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) } } diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index 36c2a2e5f7..b0e5ad142a 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -5418,6 +5418,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.2/16\"}", // used only for ic=true test "k8s.ovn.org/zone-name": node1Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0]), } if node1Zone != "global" { annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test @@ -5432,6 +5433,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.3/16\"}", // used only for ic=true test "k8s.ovn.org/zone-name": node2Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0]), } if node2Zone != "global" { annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test diff --git a/go-controller/pkg/ovn/egressip_udn_l2_test.go b/go-controller/pkg/ovn/egressip_udn_l2_test.go index 1432743e53..23a930b2ef 100644 --- a/go-controller/pkg/ovn/egressip_udn_l2_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l2_test.go @@ -304,7 +304,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) fakeOvn.controller.eIPC.zone = node1.Name fakeOvn.controller.zone = node1.Name - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -670,7 +670,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() // simulate Start() of secondary network controller - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo()) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1662,7 +1662,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol defer fakeOvn.networkManager.Stop() err = fakeOvn.controller.WatchEgressNodes() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2026,7 +2026,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index 1a5497d599..dc01cc84c4 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -161,6 +161,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -172,6 +173,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) eIP := egressipv1.EgressIP{ @@ -297,7 +299,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) fakeOvn.controller.eIPC.zone = node1.Name fakeOvn.controller.zone = node1.Name - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -537,6 +539,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -549,6 +552,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -670,7 +674,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() // simulate Start() of secondary network controller - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo()) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1056,6 +1060,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -1068,6 +1073,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -1665,6 +1671,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -1677,6 +1684,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -1798,7 +1806,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2033,6 +2041,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -2045,6 +2054,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -2170,7 +2180,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2392,6 +2402,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -2404,6 +2415,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ diff --git a/go-controller/pkg/ovn/egressservices_test.go b/go-controller/pkg/ovn/egressservices_test.go index cf2b6cb3af..5b47022a9d 100644 --- a/go-controller/pkg/ovn/egressservices_test.go +++ b/go-controller/pkg/ovn/egressservices_test.go @@ -956,6 +956,7 @@ var _ = ginkgo.Describe("OVN Egress Service Operations", func() { config.IPv6Mode = true config.OVNKubernetesFeature.EnableInterconnect = interconnectEnabled node1 := nodeFor(node1Name, node1IPv4, node1IPv6, node1IPv4Subnet, node1IPv6Subnet, node1transitIPv4, node1transitIPv6) + node1.Annotations[util.OVNNodeGRLRPAddrs] = `{"default":{"ipv4":"100.64.0.2/16", "ipv6":"fef0::56/16"}}` node2 := nodeFor(node2Name, node2IPv4, node2IPv6, node2IPv4Subnet, node2IPv6Subnet, node2transitIPv4, node2transitIPv6) clusterRouter := &nbdb.LogicalRouter{ diff --git a/go-controller/pkg/ovn/kubevirt_test.go b/go-controller/pkg/ovn/kubevirt_test.go index 1a7dd5fcae..b7c80c6399 100644 --- a/go-controller/pkg/ovn/kubevirt_test.go +++ b/go-controller/pkg/ovn/kubevirt_test.go @@ -665,6 +665,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node1].transitSwitchPortIPv4, nodeByName[node1].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node1].subnetIPv4, nodeByName[node1].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node1].lrpNetworkIPv4, nodeByName[node1].lrpNetworkIPv6), }, }, }, @@ -674,6 +675,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node2].transitSwitchPortIPv4, nodeByName[node2].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node2].subnetIPv4, nodeByName[node2].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node2].lrpNetworkIPv4, nodeByName[node2].lrpNetworkIPv6), }, }, }, @@ -683,6 +685,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node3].transitSwitchPortIPv4, nodeByName[node3].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node3].subnetIPv4, nodeByName[node3].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node3].lrpNetworkIPv4, nodeByName[node3].lrpNetworkIPv6), }, }, }, diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 7a1aad8ed7..692c768d95 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -497,7 +497,7 @@ func (oc *DefaultNetworkController) InitEgressServiceZoneController() (*egresssv return nil } // used only when IC=true - createDefaultNodeRouteToExternal := func(_ libovsdbclient.Client, _, _ string, _ []config.CIDRNetworkEntry) error { + createDefaultNodeRouteToExternal := func(_ libovsdbclient.Client, _, _ string, _ []config.CIDRNetworkEntry, _ []*net.IPNet) error { return nil } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index 69662caa96..fd15ab6684 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -634,7 +634,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 if config.OVNKubernetesFeature.EnableEgressIP && nSyncs.syncReroute { rerouteFailed := false - if err := oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo()); err != nil { + if err := oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo(), node); err != nil { errs = append(errs, fmt.Errorf("failed to ensure EgressIP router policies for network %s: %v", oc.GetNetworkName(), err)) rerouteFailed = true } diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index f8601a6b37..a6c2d500bd 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -820,7 +820,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 if config.OVNKubernetesFeature.EnableEgressIP && util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() && nSyncs.syncReroute { rerouteFailed := false - if err = oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo()); err != nil { + if err = oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo(), node); err != nil { errs = append(errs, fmt.Errorf("failed to ensure EgressIP router polices for network %s: %v", oc.GetNetworkName(), err)) rerouteFailed = true } From 304975a0f0af8be29c6089428eb82c5702e9c4f2 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Fri, 30 May 2025 17:38:04 -0700 Subject: [PATCH 028/181] Add node deletion unit testing case for zone_ic_handler Signed-off-by: Yun Zhou --- .../zone_interconnect/zone_ic_handler_test.go | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go index e2cbeb3c8b..8af1215714 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go @@ -89,6 +89,15 @@ func invokeICHandlerAddNodeFunction(zone string, icHandler *ZoneInterconnectHand return nil } +func invokeICHandlerDeleteNodeFunction(icHandler *ZoneInterconnectHandler, nodes ...*corev1.Node) error { + for _, node := range nodes { + err := icHandler.DeleteNode(node) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + return nil +} + func checkInterconnectResources(zone string, netName string, nbClient libovsdbclient.Client, testNodesRouteInfo map[string]map[string]string, nodes ...*corev1.Node) error { localZoneNodes := []*corev1.Node{} remoteZoneNodes := []*corev1.Node{} @@ -250,6 +259,7 @@ var _ = ginkgo.Describe("Zone Interconnect Operations", func() { initialNBDB []libovsdbtest.TestData initialSBDB []libovsdbtest.TestData testNodesRouteInfo map[string]map[string]string + nodeRouteInfoMap map[string]map[string]map[string]string ) const ( @@ -736,6 +746,137 @@ var _ = ginkgo.Describe("Zone Interconnect Operations", func() { }) }) + ginkgo.Context("Two secondary networks", func() { + ginkgo.BeforeEach(func() { + testNode1 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac6", + ovnNodeZoneNameAnnotation: "global", + ovnNodeIDAnnotaton: "2", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.2.0/24\"], \"blue\":[\"11.244.2.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.2/16\"}", + util.OVNNodeGRLRPAddrs: "{\"default\":{\"ipv4\":\"100.64.0.2/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.10"}}, + }, + } + // node2 is a remote zone node + testNode2 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac7", + ovnNodeZoneNameAnnotation: "foo", + ovnNodeIDAnnotaton: "3", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.3.0/24\"], \"blue\":[\"11.244.3.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.3/16\"}", + util.OVNNodeGRLRPAddrs: "{\"defalut\":{\"ipv4\":\"100.64.0.3/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.11"}}, + }, + } + // node3 is a remote zone node + testNode3 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac8", + ovnNodeZoneNameAnnotation: "foo", + ovnNodeIDAnnotaton: "4", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.4.0/24\"], \"blue\":[\"11.244.4.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.4/16\"}", + util.OVNNodeGRLRPAddrs: "{\"default\":{\"ipv4\":\"100.64.0.4/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.12"}}, + }, + } + + nodeRouteInfoMap = map[string]map[string]map[string]string{ + "red": { + "node1": {"node-subnets": "10.244.2.0/24", "ts-ip": "100.88.0.2", "host-route": "100.64.0.2/32"}, + "node2": {"node-subnets": "10.244.3.0/24", "ts-ip": "100.88.0.3", "host-route": "100.64.0.3/32"}, + "node3": {"node-subnets": "10.244.4.0/24", "ts-ip": "100.88.0.4", "host-route": "100.64.0.4/32"}, + }, + "blue": { + "node1": {"node-subnets": "11.244.2.0/24", "ts-ip": "100.88.0.2", "host-route": "100.64.0.2/32"}, + "node2": {"node-subnets": "11.244.3.0/24", "ts-ip": "100.88.0.3", "host-route": "100.64.0.3/32"}, + "node3": {"node-subnets": "11.244.4.0/24", "ts-ip": "100.88.0.4", "host-route": "100.64.0.4/32"}, + }, + } + initialNBDB = []libovsdbtest.TestData{ + newOVNClusterRouter("blue"), + newOVNClusterRouter("red"), + } + + initialSBDB = []libovsdbtest.TestData{ + &node1Chassis, &node2Chassis, &node3Chassis} + }) + + ginkgo.It("Delete remote node", func() { + app.Action = func(ctx *cli.Context) error { + dbSetup := libovsdbtest.TestSetup{ + NBData: initialNBDB, + SBData: initialSBDB, + } + + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + config.Kubernetes.HostNetworkNamespace = "" + + var libovsdbOvnNBClient, libovsdbOvnSBClient libovsdbclient.Client + libovsdbOvnNBClient, libovsdbOvnSBClient, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + zoneICHandler := map[string]*ZoneInterconnectHandler{} + for _, netName := range []string{"red", "blue"} { + err = createTransitSwitchPortBindings(libovsdbOvnSBClient, netName, &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + netInfo, err := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: cnitypes.NetConf{Name: netName}, Topology: types.Layer3Topology}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + zoneICHandler[netName] = NewZoneInterconnectHandler(netInfo, libovsdbOvnNBClient, libovsdbOvnSBClient, nil) + err = zoneICHandler[netName].createOrUpdateTransitSwitch(1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = invokeICHandlerAddNodeFunction("global", zoneICHandler[netName], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", netName, libovsdbOvnNBClient, nodeRouteInfoMap[netName], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Check the logical entities are as expected when a remote node is deleted + ginkgo.By("Delete remote node \"red\"") + delete(nodeRouteInfoMap["red"], "node3") + err = invokeICHandlerDeleteNodeFunction(zoneICHandler["red"], &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", "red", libovsdbOvnNBClient, nodeRouteInfoMap["red"], &testNode1, &testNode2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", "blue", libovsdbOvnNBClient, nodeRouteInfoMap["blue"], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return nil + } + + err := app.Run([]string{ + app.Name, + "-cluster-subnets=" + clusterCIDR, + "-init-cluster-manager", + "-zone-join-switch-subnets=" + joinSubnetCIDR, + "-enable-interconnect", + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + ginkgo.Context("Error scenarios", func() { ginkgo.It("Missing annotations and error scenarios for local node", func() { app.Action = func(ctx *cli.Context) error { From 61f57e2aff4351a9eaddec5439a97bd19d1a8f81 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Thu, 5 Jun 2025 13:12:33 -0700 Subject: [PATCH 029/181] Delete logical static routes only if they belong to the spefified router Do not try to delete the logical route static route from the specified logical router if the route does not belong to the router. Signed-off-by: Yun Zhou --- go-controller/pkg/libovsdb/ops/router.go | 37 +++---- go-controller/pkg/libovsdb/ops/router_test.go | 96 +++++++++++++++++++ 2 files changed, 109 insertions(+), 24 deletions(-) diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index 3d5a6fc255..df87307918 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -761,8 +761,8 @@ func CreateOrReplaceLogicalRouterStaticRouteWithPredicateOps( } // DeleteLogicalRouterStaticRoutesWithPredicate looks up logical router static -// routes from the cache based on a given predicate, deletes them and removes -// them from the provided logical router +// routes from the logical router of the specified name based on a given predicate, +// deletes them and removes them from the provided logical router func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterStaticRoutePredicate) error { var ops []ovsdb.Operation var err error @@ -775,32 +775,21 @@ func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client } // DeleteLogicalRouterStaticRoutesWithPredicateOps looks up logical router static -// routes from the cache based on a given predicate, and returns the ops to delete -// them and remove them from the provided logical router +// routes from the logical router of the specified name based on a given predicate, +// and returns the ops to delete them and remove them from the provided logical router func DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, routerName string, p logicalRouterStaticRoutePredicate) ([]ovsdb.Operation, error) { - router := &nbdb.LogicalRouter{ - Name: routerName, + lrsrs, err := GetRouterLogicalRouterStaticRoutesWithPredicate(nbClient, &nbdb.LogicalRouter{Name: routerName}, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + return nil, fmt.Errorf("unable to find logical router static routes with predicate on router %s: %w", routerName, err) } - deleted := []*nbdb.LogicalRouterStaticRoute{} - opModels := []operationModel{ - { - ModelPredicate: p, - ExistingResult: &deleted, - DoAfter: func() { router.StaticRoutes = extractUUIDsFromModels(deleted) }, - ErrNotFound: false, - BulkOp: true, - }, - { - Model: router, - OnModelMutations: []interface{}{&router.StaticRoutes}, - ErrNotFound: false, - BulkOp: false, - }, + if len(lrsrs) == 0 { + return ops, nil } - - m := newModelClient(nbClient) - return m.DeleteOps(ops, opModels...) + return DeleteLogicalRouterStaticRoutesOps(nbClient, ops, routerName, lrsrs...) } // DeleteLogicalRouterStaticRoutesOps deletes the logical router static routes and diff --git a/go-controller/pkg/libovsdb/ops/router_test.go b/go-controller/pkg/libovsdb/ops/router_test.go index fd4879ebd6..579814b27e 100644 --- a/go-controller/pkg/libovsdb/ops/router_test.go +++ b/go-controller/pkg/libovsdb/ops/router_test.go @@ -306,3 +306,99 @@ func TestDeleteRoutersWithPredicateOps(t *testing.T) { }) } } + +func TestDeleteLogicalRouterStaticRoutes(t *testing.T) { + fakeRouter1LRSR1 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.168.1.0/24", + Nexthop: "192.168.1.0", + ExternalIDs: map[string]string{"id": "v1"}, + } + + fakeRouter1LRSR2 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.169.1.0/24", + Nexthop: "192.169.1.0", + ExternalIDs: map[string]string{"id": "v2"}, + } + + fakeRouter2LRSR1 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.170.1.0/24", + Nexthop: "192.170.1.0", + ExternalIDs: map[string]string{"id": "v1"}, + } + + tests := []struct { + desc string + expectErr bool + routerName string + lrsrs []*nbdb.LogicalRouterStaticRoute + initialNbdb libovsdbtest.TestSetup + expectedNbdb libovsdbtest.TestSetup + }{ + { + desc: "delete logical router static route with predicate will only delete static route from the specified router", + initialNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + fakeRouter1LRSR1, + fakeRouter1LRSR2, + fakeRouter2LRSR1, + &nbdb.LogicalRouter{ + Name: "rtr1", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter1LRSR1.UUID, fakeRouter1LRSR2.UUID}, + }, + &nbdb.LogicalRouter{ + Name: "rtr2", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter2LRSR1.UUID}, + }, + }, + }, + expectedNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + fakeRouter1LRSR2, + fakeRouter2LRSR1, + &nbdb.LogicalRouter{ + Name: "rtr1", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter1LRSR2.UUID}, + }, + &nbdb.LogicalRouter{ + Name: "rtr2", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter2LRSR1.UUID}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(tt.initialNbdb, nil) + if err != nil { + t.Fatalf("test: \"%s\" failed to set up test harness: %v", tt.desc, err) + } + t.Cleanup(cleanup.Cleanup) + + err = DeleteLogicalRouterStaticRoutesWithPredicate(nbClient, "rtr1", func(item *nbdb.LogicalRouterStaticRoute) bool { + return item.ExternalIDs["id"] == "v1" + }) + if err != nil && !tt.expectErr { + t.Fatal(fmt.Errorf("DeleteLogicalRouterStaticRoutesWithPredicate() error = %v", err)) + } + + matcher := libovsdbtest.HaveData(tt.expectedNbdb.NBData) + success, err := matcher.Match(nbClient) + + if !success { + t.Fatal(fmt.Errorf("test: \"%s\" didn't match expected with actual, err: %v", tt.desc, matcher.FailureMessage(nbClient))) + } + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" encountered error: %v", tt.desc, err)) + } + }) + } +} From d14d8483505700c60c808d87daa2bbe6c29efe08 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Thu, 29 May 2025 10:01:36 -0700 Subject: [PATCH 030/181] remote node deletion failure due to libovsdb integrity violation error ovnkube-controller is trying to delete logical static route from the router it does not belong, which ends with the error: "referential integrity violation: cannot delete Logical_Router_Static_Route row ... because of 1 remaining references" Signed-off-by: Yun Zhou --- .../pkg/ovn/zone_interconnect/zone_ic_handler.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index cc849b6c15..f484bc1528 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -515,7 +515,9 @@ func (zic *ZoneInterconnectHandler) cleanupNode(nodeName string) error { return err } - // Delete any static routes in the cluster router for this node + // Delete any static routes in the cluster router for this node. + // skip types.NetworkExternalID check in the predicate function as this static route may be deleted + // before types.NetworkExternalID external-ids is set correctly during upgrade. p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.ExternalIDs["ic-node"] == nodeName } @@ -573,11 +575,15 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, addRoute := func(prefix, nexthop string) error { logicalRouterStaticRoute := nbdb.LogicalRouterStaticRoute{ ExternalIDs: map[string]string{ - "ic-node": node.Name, + "ic-node": node.Name, + types.NetworkExternalID: zic.GetNetworkName(), }, Nexthop: nexthop, IPPrefix: prefix, } + // Note that because logical router static routes were originally created without types.NetworkExternalID + // external-ids, skip types.NetworkExternalID check in the predicate function to replace existing static route + // with correct external-ids on an upgrade scenario. p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.IPPrefix == prefix && lrsr.Nexthop == nexthop && @@ -613,6 +619,8 @@ func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, // deleteLocalNodeStaticRoutes deletes the static routes added by the function addRemoteNodeStaticRoutes func (zic *ZoneInterconnectHandler) deleteLocalNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs []*net.IPNet) error { + // skip types.NetworkExternalID check in the predicate function as this static route may be deleted + // before types.NetworkExternalID external-ids is set correctly during upgrade. deleteRoute := func(prefix, nexthop string) error { p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.IPPrefix == prefix && From 10be5961a8663d801ec16b34ba60633d867cfa8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 15:35:32 +0000 Subject: [PATCH 031/181] Bump golang.org/x/crypto Bumps the go_modules group with 1 update in the /test/e2e directory: [golang.org/x/crypto](https://github.com/golang/crypto). Updates `golang.org/x/crypto` from 0.24.0 to 0.31.0 - [Commits](https://github.com/golang/crypto/compare/v0.24.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect dependency-group: go_modules ... Signed-off-by: dependabot[bot] --- test/e2e/go.mod | 10 +++++----- test/e2e/go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 6a865f71ee..d1d514d1f9 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -12,7 +12,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.11.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 @@ -145,13 +145,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.35.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/term v0.29.0 // indirect + golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 6fee7ac542..239bd56b7a 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -581,8 +581,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -690,8 +690,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -752,15 +752,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -771,8 +771,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 2412b5068f042c9ccd4a3c3e4e8a586e99877aeb Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Mon, 3 Feb 2025 15:11:50 +0200 Subject: [PATCH 032/181] stop adding events to NAD if the network type is not ovn-k If NADs like bridge,macvlan or others exist we should not record an error event for it Also in case the NAD is not ovn-k for example multus we support chain plugins. Signed-off-by: Sebastian Sch --- go-controller/pkg/config/cni.go | 8 ++++---- .../pkg/networkmanager/nad_controller.go | 5 +++++ .../pkg/networkmanager/nad_controller_test.go | 15 +++++++++++++++ go-controller/pkg/util/multi_network.go | 3 +++ go-controller/pkg/util/multi_network_test.go | 12 +++++++++++- 5 files changed, 38 insertions(+), 5 deletions(-) diff --git a/go-controller/pkg/config/cni.go b/go-controller/pkg/config/cni.go index 3d935c5c6a..3bec2d286f 100644 --- a/go-controller/pkg/config/cni.go +++ b/go-controller/pkg/config/cni.go @@ -120,10 +120,6 @@ func parseNetConfSingle(bytes []byte) (*ovncnitypes.NetConf, error) { } func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, error) { - if len(confList.Plugins) > 1 { - return nil, ErrorChainingNotSupported - } - netconf := &ovncnitypes.NetConf{MTU: Default.MTU} if err := json.Unmarshal(confList.Plugins[0].Bytes, netconf); err != nil { return nil, err @@ -134,6 +130,10 @@ func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, return nil, ErrorAttachDefNotOvnManaged } + if len(confList.Plugins) > 1 { + return nil, ErrorChainingNotSupported + } + netconf.Name = confList.Name netconf.CNIVersion = confList.CNIVersion diff --git a/go-controller/pkg/networkmanager/nad_controller.go b/go-controller/pkg/networkmanager/nad_controller.go index b0c6a3198a..a212566ce1 100644 --- a/go-controller/pkg/networkmanager/nad_controller.go +++ b/go-controller/pkg/networkmanager/nad_controller.go @@ -274,6 +274,11 @@ func (c *nadController) syncNAD(key string, nad *nettypes.NetworkAttachmentDefin if nad != nil { nadNetwork, err = util.ParseNADInfo(nad) if err != nil { + // in case the type for the NAD is not ovn-k we should not record the error event + if err.Error() == util.ErrorAttachDefNotOvnManaged.Error() { + return nil + } + if c.recorder != nil { c.recorder.Eventf(&corev1.ObjectReference{Kind: nad.Kind, Namespace: nad.Namespace, Name: nad.Name}, corev1.EventTypeWarning, "InvalidConfig", "Failed to parse network config: %v", err.Error()) diff --git a/go-controller/pkg/networkmanager/nad_controller_test.go b/go-controller/pkg/networkmanager/nad_controller_test.go index c8a59b30b4..1ce5ad9168 100644 --- a/go-controller/pkg/networkmanager/nad_controller_test.go +++ b/go-controller/pkg/networkmanager/nad_controller_test.go @@ -469,6 +469,21 @@ func TestNADController(t *testing.T) { }, }, }, + { + name: "non ovn-k NAD added", + args: []args{ + { + nad: "test/nad_1", + network: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: "test", + Type: "sriov", + }, + }, + wantErr: false, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index b1679462f3..2cf3d906f6 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -1151,6 +1151,9 @@ func ParseNADInfo(nad *nettypes.NetworkAttachmentDefinition) (NetInfo, error) { func ParseNetConf(netattachdef *nettypes.NetworkAttachmentDefinition) (*ovncnitypes.NetConf, error) { netconf, err := config.ParseNetConf([]byte(netattachdef.Spec.Config)) if err != nil { + if err.Error() == ErrorAttachDefNotOvnManaged.Error() { + return nil, err + } return nil, fmt.Errorf("error parsing Network Attachment Definition %s/%s: %v", netattachdef.Namespace, netattachdef.Name, err) } diff --git a/go-controller/pkg/util/multi_network_test.go b/go-controller/pkg/util/multi_network_test.go index 56f18d058a..daaaf920a5 100644 --- a/go-controller/pkg/util/multi_network_test.go +++ b/go-controller/pkg/util/multi_network_test.go @@ -180,7 +180,7 @@ func TestParseNetconf(t *testing.T) { "netAttachDefName": "default/tenantred" } `, - expectedError: fmt.Errorf("error parsing Network Attachment Definition ns1/nad1: net-attach-def not managed by OVN"), + expectedError: fmt.Errorf("net-attach-def not managed by OVN"), }, { desc: "attachment definition with IPAM key defined, using a wrong type", @@ -1154,6 +1154,16 @@ func TestSubnetOverlapCheck(t *testing.T) { } `, }, + { + desc: "return error when the network is not ovnk", + inputNetAttachDefConfigSpec: ` + { + "name": "test", + "type": "sriov-cni" + } + `, + expectedError: ErrorAttachDefNotOvnManaged, + }, } for _, test := range tests { From 23c3b5a8ebf02a8c532f8f524b4866d5764c4e13 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 12 Jun 2025 10:17:17 +0200 Subject: [PATCH 033/181] [ACL tier] Rename BuildACL to BuildACLWithDefaultTier This helps to avoid confusion about defaulting the ACL tier. Update BuildACL to require the tier as an argument. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/libovsdb/util/acl.go | 13 ++++++++++--- .../pkg/ovn/admin_network_policy_test.go | 2 +- .../ovn/base_network_controller_multicast.go | 8 ++++---- .../pkg/ovn/base_network_controller_policy.go | 10 +++++----- go-controller/pkg/ovn/egressfirewall.go | 2 +- go-controller/pkg/ovn/gateway_test.go | 2 +- go-controller/pkg/ovn/gress_policy.go | 4 ++-- go-controller/pkg/ovn/udn_isolation.go | 18 +++++++++--------- go-controller/pkg/types/const.go | 7 +++---- 9 files changed, 36 insertions(+), 30 deletions(-) diff --git a/go-controller/pkg/libovsdb/util/acl.go b/go-controller/pkg/libovsdb/util/acl.go index dbb6c2b3e5..798c1b773a 100644 --- a/go-controller/pkg/libovsdb/util/acl.go +++ b/go-controller/pkg/libovsdb/util/acl.go @@ -88,11 +88,18 @@ func GetACLName(dbIDs *libovsdbops.DbObjectIDs) string { return fmt.Sprintf("%.63s", aclName) } +// BuildACLWithDefaultTier is used for the most ACL-related features with the default ACL tier. +// That includes egress firewall, network policy, multicast. +func BuildACLWithDefaultTier(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, logLevels *ACLLoggingLevels, + aclT ACLPipelineType) *nbdb.ACL { + return BuildACL(dbIDs, priority, match, action, logLevels, aclT, types.DefaultACLTier) +} + // BuildACL should be used to build ACL instead of directly calling libovsdbops.BuildACL. // It can properly set and reset log settings for ACL based on ACLLoggingLevels, and // set acl.Name and acl.ExternalIDs based on given DbIDs func BuildACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, logLevels *ACLLoggingLevels, - aclT ACLPipelineType) *nbdb.ACL { + aclT ACLPipelineType, tier int) *nbdb.ACL { var options map[string]string var direction string switch aclT { @@ -122,13 +129,13 @@ func BuildACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string log, externalIDs, options, - types.DefaultACLTier, + tier, ) return ACL } func BuildANPACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, aclT ACLPipelineType, logLevels *ACLLoggingLevels) *nbdb.ACL { - anpACL := BuildACL(dbIDs, priority, match, action, logLevels, aclT) + anpACL := BuildACLWithDefaultTier(dbIDs, priority, match, action, logLevels, aclT) anpACL.Tier = GetACLTier(dbIDs) return anpACL } diff --git a/go-controller/pkg/ovn/admin_network_policy_test.go b/go-controller/pkg/ovn/admin_network_policy_test.go index 82eac3cf9d..152ee0c0a8 100644 --- a/go-controller/pkg/ovn/admin_network_policy_test.go +++ b/go-controller/pkg/ovn/admin_network_policy_test.go @@ -94,7 +94,7 @@ func getANPGressACL(action, anpName, direction string, rulePriority int32, ruleIndex int32, ports *[]anpapi.AdminNetworkPolicyPort, namedPorts map[string][]libovsdbutil.NamedNetworkPolicyPort, banp bool) []*nbdb.ACL { retACLs := []*nbdb.ACL{} - // we are not using BuildACL and instead manually building it on purpose so that the code path for BuildACL is also tested + // we are not using BuildACLWithDefaultTier and instead manually building it on purpose so that the code path for BuildACLWithDefaultTier is also tested acl := nbdb.ACL{} acl.Action = action acl.Severity = nil diff --git a/go-controller/pkg/ovn/base_network_controller_multicast.go b/go-controller/pkg/ovn/base_network_controller_multicast.go index eadb47882a..6f413177d5 100644 --- a/go-controller/pkg/ovn/base_network_controller_multicast.go +++ b/go-controller/pkg/ovn/base_network_controller_multicast.go @@ -119,13 +119,13 @@ func (bnc *BaseNetworkController) createMulticastAllowPolicy(ns string, nsInfo * egressMatch := libovsdbutil.GetACLMatch(portGroupName, bnc.getMulticastACLEgrMatch(), aclDir) dbIDs := getNamespaceMcastACLDbIDs(ns, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - egressACL := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, egressMatch, nbdb.ACLActionAllow, nil, aclPipeline) + egressACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, egressMatch, nbdb.ACLActionAllow, nil, aclPipeline) aclDir = libovsdbutil.ACLIngress ingressMatch := libovsdbutil.GetACLMatch(portGroupName, bnc.getMulticastACLIgrMatch(nsInfo), aclDir) dbIDs = getNamespaceMcastACLDbIDs(ns, aclDir, bnc.controllerName) aclPipeline = libovsdbutil.ACLDirectionToACLPipeline(aclDir) - ingressACL := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, ingressMatch, nbdb.ACLActionAllow, nil, aclPipeline) + ingressACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, ingressMatch, nbdb.ACLActionAllow, nil, aclPipeline) acls := []*nbdb.ACL{egressACL, ingressACL} ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) @@ -186,7 +186,7 @@ func (bnc *BaseNetworkController) createDefaultDenyMulticastPolicy() error { for _, aclDir := range []libovsdbutil.ACLDirection{libovsdbutil.ACLEgress, libovsdbutil.ACLIngress} { dbIDs := getDefaultMcastACLDbIDs(mcastDefaultDenyID, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - acl := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastDenyPriority, match, nbdb.ACLActionDrop, nil, aclPipeline) + acl := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastDenyPriority, match, nbdb.ACLActionDrop, nil, aclPipeline) acls = append(acls, acl) } ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) @@ -228,7 +228,7 @@ func (bnc *BaseNetworkController) createDefaultAllowMulticastPolicy() error { match := libovsdbutil.GetACLMatch(rtrPGName, mcastMatch, aclDir) dbIDs := getDefaultMcastACLDbIDs(mcastAllowInterNodeID, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - acl := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, match, nbdb.ACLActionAllow, nil, aclPipeline) + acl := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, match, nbdb.ACLActionAllow, nil, aclPipeline) acls = append(acls, acl) } diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index f4c10bfacf..95665068c7 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -246,11 +246,11 @@ func (bnc *BaseNetworkController) addHairpinAllowACL() error { } ingressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeIngress)) - ingressACL := libovsdbutil.BuildACL(ingressACLIDs, types.DefaultAllowPriority, match, + ingressACL := libovsdbutil.BuildACLWithDefaultTier(ingressACLIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) egressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeEgress)) - egressACL := libovsdbutil.BuildACL(egressACLIDs, types.DefaultAllowPriority, match, + egressACL := libovsdbutil.BuildACLWithDefaultTier(egressACLIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportEgressAfterLB) ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, nil, ingressACL, egressACL) @@ -329,7 +329,7 @@ func (bnc *BaseNetworkController) addAllowACLFromNode(switchName string, mgmtPor } match := fmt.Sprintf("%s.src==%s", ipFamily, mgmtPortIP.String()) dbIDs := getAllowFromNodeACLDbIDs(switchName, mgmtPortIP.String(), bnc.controllerName) - nodeACL := libovsdbutil.BuildACL(dbIDs, types.DefaultAllowPriority, match, + nodeACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), nodeACL) @@ -382,9 +382,9 @@ func (bnc *BaseNetworkController) buildDenyACLs(namespace, pgName string, aclLog allowMatch := libovsdbutil.GetACLMatch(pgName, arpAllowPolicyMatch, aclDir) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - denyACL = libovsdbutil.BuildACL(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, defaultDenyACL), + denyACL = libovsdbutil.BuildACLWithDefaultTier(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, defaultDenyACL), types.DefaultDenyPriority, denyMatch, nbdb.ACLActionDrop, aclLogging, aclPipeline) - allowACL = libovsdbutil.BuildACL(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, arpAllowACL), + allowACL = libovsdbutil.BuildACLWithDefaultTier(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, arpAllowACL), types.DefaultAllowPriority, allowMatch, nbdb.ACLActionAllow, nil, aclPipeline) return } diff --git a/go-controller/pkg/ovn/egressfirewall.go b/go-controller/pkg/ovn/egressfirewall.go index 4e49505d04..9618c1b5a9 100644 --- a/go-controller/pkg/ovn/egressfirewall.go +++ b/go-controller/pkg/ovn/egressfirewall.go @@ -467,7 +467,7 @@ func (oc *DefaultNetworkController) addEgressFirewallRules(ef *egressFirewall, p func (oc *DefaultNetworkController) createEgressFirewallACLOps(ops []ovsdb.Operation, ruleIdx int, match, action, namespace, pgName string, aclLogging *libovsdbutil.ACLLoggingLevels) ([]ovsdb.Operation, error) { aclIDs := oc.getEgressFirewallACLDbIDs(namespace, ruleIdx) priority := types.EgressFirewallStartPriority - ruleIdx - egressFirewallACL := libovsdbutil.BuildACL( + egressFirewallACL := libovsdbutil.BuildACLWithDefaultTier( aclIDs, priority, match, diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 57f5fb4be2..b7a29739e4 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -43,7 +43,7 @@ func generateAdvertisedUDNIsolationExpectedNB(testData []libovsdbtest.TestData, passMatches = append(passMatches, fmt.Sprintf("(%s.src == %s && %s.dst == %s)", ipPrefix, subnet, ipPrefix, subnet)) } - passACL := libovsdbutil.BuildACL( + passACL := libovsdbutil.BuildACLWithDefaultTier( GetAdvertisedNetworkSubnetsPassACLdbIDs(DefaultNetworkControllerName, networkName, networkID), types.AdvertisedNetworkPassPriority, strings.Join(passMatches, " || "), diff --git a/go-controller/pkg/ovn/gress_policy.go b/go-controller/pkg/ovn/gress_policy.go index c8445e6ed5..cb152f1e8b 100644 --- a/go-controller/pkg/ovn/gress_policy.go +++ b/go-controller/pkg/ovn/gress_policy.go @@ -281,7 +281,7 @@ func (gp *gressPolicy) buildLocalPodACLs(portGroupName string, aclLogging *libov ipBlockMatches := gp.getMatchFromIPBlock(lportMatch, l4Match) for ipBlockIdx, ipBlockMatch := range ipBlockMatches { aclIDs := gp.getNetpolACLDbIDs(ipBlockIdx, protocol) - acl := libovsdbutil.BuildACL(aclIDs, types.DefaultAllowPriority, ipBlockMatch, action, + acl := libovsdbutil.BuildACLWithDefaultTier(aclIDs, types.DefaultAllowPriority, ipBlockMatch, action, aclLogging, gp.aclPipeline) createdACLs = append(createdACLs, acl) } @@ -302,7 +302,7 @@ func (gp *gressPolicy) buildLocalPodACLs(portGroupName string, aclLogging *libov addrSetMatch = fmt.Sprintf("%s && %s && %s", l3Match, l4Match, lportMatch) } aclIDs := gp.getNetpolACLDbIDs(emptyIdx, protocol) - acl := libovsdbutil.BuildACL(aclIDs, types.DefaultAllowPriority, addrSetMatch, action, + acl := libovsdbutil.BuildACLWithDefaultTier(aclIDs, types.DefaultAllowPriority, addrSetMatch, action, aclLogging, gp.aclPipeline) if l3Match == "" { // if l3Match is empty, then no address sets are selected for a given gressPolicy. diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 6c44489f9c..98c716d6cc 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -63,7 +63,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { pgName := libovsdbutil.GetPortGroupName(pgIDs) egressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) - egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportEgress) + egressDenyACL := libovsdbutil.BuildACLWithDefaultTier(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportEgress) getARPMatch := func(direction libovsdbutil.ACLDirection) string { match := "(" @@ -89,15 +89,15 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { egressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLEgress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLEgress), libovsdbutil.ACLEgress) - egressARPACL := libovsdbutil.BuildACL(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + egressARPACL := libovsdbutil.BuildACLWithDefaultTier(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) ingressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLIngress) - ingressDenyACL := libovsdbutil.BuildACL(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportIngress) + ingressDenyACL := libovsdbutil.BuildACLWithDefaultTier(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportIngress) ingressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLIngress), libovsdbutil.ACLIngress) - ingressARPACL := libovsdbutil.BuildACL(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportIngress) + ingressARPACL := libovsdbutil.BuildACLWithDefaultTier(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportIngress) ingressAllowIDs := oc.getUDNACLDbIDs(AllowHostSecondaryACL, libovsdbutil.ACLIngress) match = "(" @@ -114,7 +114,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { } match += ")" match = libovsdbutil.GetACLMatch(pgName, match, libovsdbutil.ACLIngress) - ingressAllowACL := libovsdbutil.BuildACL(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + ingressAllowACL := libovsdbutil.BuildACLWithDefaultTier(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, oc.GetSamplingConfig(), egressDenyACL, egressARPACL, ingressARPACL, ingressDenyACL, ingressAllowACL) if err != nil { @@ -199,11 +199,11 @@ func (oc *DefaultNetworkController) setUDNPodOpenPortsOps(podNamespacedName stri ingressMatch, egressMatch, parseErr := getPortsMatches(podAnnotations, lspName) // don't return on parseErr, as we need to cleanup potentially present ACLs from the previous config ingressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLIngress) - ingressACL := libovsdbutil.BuildACL(ingressIDs, types.PrimaryUDNAllowPriority, + ingressACL := libovsdbutil.BuildACLWithDefaultTier(ingressIDs, types.PrimaryUDNAllowPriority, ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) egressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLEgress) - egressACL := libovsdbutil.BuildACL(egressIDs, types.PrimaryUDNAllowPriority, + egressACL := libovsdbutil.BuildACLWithDefaultTier(egressIDs, types.PrimaryUDNAllowPriority, egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) var err error @@ -282,7 +282,7 @@ func BuildAdvertisedNetworkSubnetsDropACL(advertisedNetworkSubnetsAddressSet add dropMatches = append(dropMatches, fmt.Sprintf("(ip6.src == $%s && ip6.dst == $%s)", v6AddrSet, v6AddrSet)) } - dropACL := libovsdbutil.BuildACL( + dropACL := libovsdbutil.BuildACLWithDefaultTier( GetAdvertisedNetworkSubnetsDropACLdbIDs(), types.AdvertisedNetworkDenyPriority, strings.Join(dropMatches, " || "), @@ -325,7 +325,7 @@ func (bnc *BaseNetworkController) addAdvertisedNetworkIsolation(nodeName string) ops = append(ops, addrOps...) if len(passMatches) > 0 { - passACL := libovsdbutil.BuildACL( + passACL := libovsdbutil.BuildACLWithDefaultTier( GetAdvertisedNetworkSubnetsPassACLdbIDs(bnc.controllerName, bnc.GetNetworkName(), bnc.GetNetworkID()), types.AdvertisedNetworkPassPriority, strings.Join(passMatches, " || "), diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 2acd2d5a23..452421d289 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -74,7 +74,7 @@ const ( TransitSwitchToRouterPrefix = "tstor-" RouterToTransitSwitchPrefix = "rtots-" - // ACL Default Tier Priorities + // DefaultACLTier Priorities // Default routed multicast allow acl rule priority DefaultRoutedMcastAllowPriority = 1013 @@ -91,7 +91,8 @@ const ( // Deny priority for isolated advertised networks AdvertisedNetworkDenyPriority = 1050 - // ACL PlaceHolderACL Tier Priorities + // PrimaryACLTier Priorities + PrimaryUDNAllowPriority = 1001 // Default deny acl rule priority PrimaryUDNDenyPriority = 1000 @@ -99,8 +100,6 @@ const ( // ACL Tiers // Tier 0 is called Primary as it is evaluated before any other feature-related Tiers. // Currently used for User Defined Network Feature. - // NOTE: When we upgrade from an OVN version without tiers to the new version with - // tiers, all values in the new ACL.Tier column will be set to 0. PrimaryACLTier = 0 // Default Tier for all ACLs DefaultACLTier = 2 From 150775e15930467140bb43a56b71fa6c42e458e1 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 12 Jun 2025 10:19:10 +0200 Subject: [PATCH 034/181] [UDN isolation] Fix ACLs tier: move to the highest-prio Primary tier. Start using new BuildACL for all functions that need non-default tier. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/libovsdb/util/acl.go | 3 +- go-controller/pkg/ovn/udn_isolation.go | 37 ++++++++------ go-controller/pkg/ovn/udn_isolation_test.go | 53 +++++++++++++++++++++ 3 files changed, 76 insertions(+), 17 deletions(-) create mode 100644 go-controller/pkg/ovn/udn_isolation_test.go diff --git a/go-controller/pkg/libovsdb/util/acl.go b/go-controller/pkg/libovsdb/util/acl.go index 798c1b773a..71608aac15 100644 --- a/go-controller/pkg/libovsdb/util/acl.go +++ b/go-controller/pkg/libovsdb/util/acl.go @@ -135,8 +135,7 @@ func BuildACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string } func BuildANPACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, aclT ACLPipelineType, logLevels *ACLLoggingLevels) *nbdb.ACL { - anpACL := BuildACLWithDefaultTier(dbIDs, priority, match, action, logLevels, aclT) - anpACL.Tier = GetACLTier(dbIDs) + anpACL := BuildACL(dbIDs, priority, match, action, logLevels, aclT, GetACLTier(dbIDs)) return anpACL } diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 98c716d6cc..0a69592aa3 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -30,6 +30,8 @@ const ( DenySecondaryACL = "DenySecondary" // OpenPortACLPrefix is used to build per-pod ACLs, pod name should be added to the prefix to build a unique name OpenPortACLPrefix = "OpenPort-" + // the same tier is used for all UDN isolation ACLs + isolationTier = types.PrimaryACLTier ) // setupUDNACLs should be called after the node's management port was configured @@ -63,7 +65,8 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { pgName := libovsdbutil.GetPortGroupName(pgIDs) egressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) - egressDenyACL := libovsdbutil.BuildACLWithDefaultTier(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportEgress) + egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportEgress, isolationTier) getARPMatch := func(direction libovsdbutil.ACLDirection) string { match := "(" @@ -89,15 +92,18 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { egressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLEgress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLEgress), libovsdbutil.ACLEgress) - egressARPACL := libovsdbutil.BuildACLWithDefaultTier(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + egressARPACL := libovsdbutil.BuildACL(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, + nil, libovsdbutil.LportEgress, isolationTier) ingressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLIngress) - ingressDenyACL := libovsdbutil.BuildACLWithDefaultTier(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportIngress) + ingressDenyACL := libovsdbutil.BuildACL(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportIngress, isolationTier) ingressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLIngress), libovsdbutil.ACLIngress) - ingressARPACL := libovsdbutil.BuildACLWithDefaultTier(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportIngress) + ingressARPACL := libovsdbutil.BuildACL(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, + nil, libovsdbutil.LportIngress, isolationTier) ingressAllowIDs := oc.getUDNACLDbIDs(AllowHostSecondaryACL, libovsdbutil.ACLIngress) match = "(" @@ -114,7 +120,8 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { } match += ")" match = libovsdbutil.GetACLMatch(pgName, match, libovsdbutil.ACLIngress) - ingressAllowACL := libovsdbutil.BuildACLWithDefaultTier(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + ingressAllowACL := libovsdbutil.BuildACL(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, + nil, libovsdbutil.LportIngress, isolationTier) ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, oc.GetSamplingConfig(), egressDenyACL, egressARPACL, ingressARPACL, ingressDenyACL, ingressAllowACL) if err != nil { @@ -199,12 +206,12 @@ func (oc *DefaultNetworkController) setUDNPodOpenPortsOps(podNamespacedName stri ingressMatch, egressMatch, parseErr := getPortsMatches(podAnnotations, lspName) // don't return on parseErr, as we need to cleanup potentially present ACLs from the previous config ingressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLIngress) - ingressACL := libovsdbutil.BuildACLWithDefaultTier(ingressIDs, types.PrimaryUDNAllowPriority, - ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + ingressACL := libovsdbutil.BuildACL(ingressIDs, types.PrimaryUDNAllowPriority, + ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress, isolationTier) egressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLEgress) - egressACL := libovsdbutil.BuildACLWithDefaultTier(egressIDs, types.PrimaryUDNAllowPriority, - egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + egressACL := libovsdbutil.BuildACL(egressIDs, types.PrimaryUDNAllowPriority, + egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress, isolationTier) var err error if ingressMatch == "" && egressMatch == "" || parseErr != nil { @@ -282,14 +289,14 @@ func BuildAdvertisedNetworkSubnetsDropACL(advertisedNetworkSubnetsAddressSet add dropMatches = append(dropMatches, fmt.Sprintf("(ip6.src == $%s && ip6.dst == $%s)", v6AddrSet, v6AddrSet)) } - dropACL := libovsdbutil.BuildACLWithDefaultTier( + dropACL := libovsdbutil.BuildACL( GetAdvertisedNetworkSubnetsDropACLdbIDs(), types.AdvertisedNetworkDenyPriority, strings.Join(dropMatches, " || "), nbdb.ACLActionDrop, nil, - libovsdbutil.LportEgressAfterLB) - dropACL.Tier = types.PrimaryACLTier + libovsdbutil.LportEgressAfterLB, + isolationTier) return dropACL } @@ -325,14 +332,14 @@ func (bnc *BaseNetworkController) addAdvertisedNetworkIsolation(nodeName string) ops = append(ops, addrOps...) if len(passMatches) > 0 { - passACL := libovsdbutil.BuildACLWithDefaultTier( + passACL := libovsdbutil.BuildACL( GetAdvertisedNetworkSubnetsPassACLdbIDs(bnc.controllerName, bnc.GetNetworkName(), bnc.GetNetworkID()), types.AdvertisedNetworkPassPriority, strings.Join(passMatches, " || "), nbdb.ACLActionPass, nil, - libovsdbutil.LportEgressAfterLB) - passACL.Tier = types.PrimaryACLTier + libovsdbutil.LportEgressAfterLB, + isolationTier) ops, err = libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, ops, nil, passACL) if err != nil { diff --git a/go-controller/pkg/ovn/udn_isolation_test.go b/go-controller/pkg/ovn/udn_isolation_test.go new file mode 100644 index 0000000000..2b3afda328 --- /dev/null +++ b/go-controller/pkg/ovn/udn_isolation_test.go @@ -0,0 +1,53 @@ +package ovn + +import ( + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("UDN Isolation", func() { + BeforeEach(func() { + Expect(config.PrepareTestConfig()).To(Succeed()) + }) + + It("ACLs should be updated to the Primary tier ", func() { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + fakeController := getFakeController(DefaultNetworkControllerName) + + // build port group with one ACL that has default tier + pgIDs := fakeController.getSecondaryPodsPortGroupDbIDs() + pgName := libovsdbutil.GetPortGroupName(pgIDs) + egressDenyIDs := fakeController.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) + match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) + // in the real code we use BuildACL here instead of BuildACLWithDefaultTier + egressDenyACL := libovsdbutil.BuildACLWithDefaultTier(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportEgress) + // required to make sure port group correctly references the ACL + egressDenyACL.UUID = egressDenyIDs.String() + "-UUID" + pg := libovsdbutil.BuildPortGroup(pgIDs, nil, []*nbdb.ACL{egressDenyACL}) + + nbClient, nbCleanup, err := libovsdbtest.NewNBTestHarness(libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{egressDenyACL, pg}, + }, nil) + Expect(err).NotTo(HaveOccurred()) + defer nbCleanup.Cleanup() + fakeController.nbClient = nbClient + + // now run the setupUDNACLs function which should create all ACLs and update the existing ACLs to the Primary tier + Expect(fakeController.setupUDNACLs(nil)).To(Succeed()) + + // verify that the egressDenyACL is updated to the Primary 0 + acls, err := libovsdbops.FindACLs(nbClient, []*nbdb.ACL{egressDenyACL}) + Expect(err).NotTo(HaveOccurred()) + Expect(acls).To(HaveLen(1)) + Expect(acls[0].Tier).To(Equal(types.PrimaryACLTier)) + }) +}) From 228d44402b2163074e3279e5e8080a9fdf8aa7d8 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Fri, 16 May 2025 04:36:39 +0100 Subject: [PATCH 035/181] GH VM: remove more items after disk space limit reached Prune volumes Delete swap file Signed-off-by: Martin Kennelly --- .github/workflows/test.yml | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index faf2754076..67eb711b23 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -312,6 +312,7 @@ jobs: - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ @@ -320,6 +321,17 @@ jobs: llvm-* microsoft-edge-stable mono-* \ msbuild mysql-server-core-* php-* php7* \ powershell temurin-* zulu-* + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Download test-image-master uses: actions/download-artifact@v4 @@ -503,6 +515,7 @@ jobs: - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ @@ -511,7 +524,17 @@ jobs: llvm-* microsoft-edge-stable mono-* \ msbuild mysql-server-core-* php-* php7* \ powershell temurin-* zulu-* - sudo docker system prune -af + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Setup /mnt/runner directory run: | @@ -723,6 +746,7 @@ jobs: - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ @@ -731,6 +755,17 @@ jobs: llvm-* microsoft-edge-stable mono-* \ msbuild mysql-server-core-* php-* php7* \ powershell temurin-* zulu-* + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Disable ufw # For IPv6 and Dualstack, ufw (Uncomplicated Firewall) should be disabled. From deff5e64ccc6069437bf7abf08f61522f73501a1 Mon Sep 17 00:00:00 2001 From: Peng Liu Date: Fri, 6 Jun 2025 05:24:48 +0000 Subject: [PATCH 036/181] Add the IP rule for a UDN only when it is advertised to the default VRF When an UDN is advertised to a non default VRF, we shall not add the ip rule to the default VRF. Otherwise if another UDN is advertised to the default VRF with the same subnet. The ingress traffic intended for the second UDN cannot be correctly routed to its respective VRF. Signed-off-by: Peng Liu --- go-controller/pkg/node/gateway_udn.go | 194 ++++++++++++-------- go-controller/pkg/node/gateway_udn_test.go | 200 ++++++++++++++++++++- 2 files changed, 315 insertions(+), 79 deletions(-) diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 7b755806fd..b207a4f009 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -89,6 +89,10 @@ type UserDefinedNetworkGateway struct { // gwInterfaceIndex holds the link index of gateway interface gwInterfaceIndex int + + // save BGP state at the start of reconciliation loop run to handle it consistently throughout the run + isNetworkAdvertisedToDefaultVRF bool + isNetworkAdvertised bool } // UTILS Needed for UDN (also leveraged for default netInfo) in bridgeConfiguration @@ -366,18 +370,18 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { return fmt.Errorf("could not add VRF %s routes for network %s, err: %v", vrfDeviceName, udng.GetNetworkName(), err) } - isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) + udng.updateAdvertisementStatus() // create the iprules for this network - if err = udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { + if err = udng.updateUDNVRFIPRules(); err != nil { return fmt.Errorf("failed to update IP rules for network %s: %w", udng.GetNetworkName(), err) } - if err = udng.updateAdvertisedUDNIsolationRules(isNetworkAdvertised); err != nil { + if err = udng.updateAdvertisedUDNIsolationRules(); err != nil { return fmt.Errorf("failed to update isolation rules for network %s: %w", udng.GetNetworkName(), err) } - if err := udng.updateUDNVRFIPRoute(isNetworkAdvertised); err != nil { + if err := udng.updateUDNVRFIPRoute(); err != nil { return fmt.Errorf("failed to update ip routes for network %s: %w", udng.GetNetworkName(), err) } @@ -455,18 +459,16 @@ func (udng *UserDefinedNetworkGateway) DelNetwork() error { } } - if util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) { - err := udng.updateAdvertisedUDNIsolationRules(false) - if err != nil { - return fmt.Errorf("failed to remove advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) - } + err := udng.deleteAdvertisedUDNIsolationRules() + if err != nil { + return fmt.Errorf("failed to remove advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) } if err := udng.delMarkChain(); err != nil { return err } // delete the management port interface for this network - err := udng.deleteUDNManagementPort() + err = udng.deleteUDNManagementPort() if err != nil { return err } @@ -622,8 +624,7 @@ func (udng *UserDefinedNetworkGateway) computeRoutesForUDN(mpLink netlink.Link) // Route2: Add default route: default via 172.18.0.1 dev breth0 mtu 1400 // necessary for UDN CNI and host-networked pods default traffic to go to node's gatewayIP - isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) - defaultRoute, err := udng.getDefaultRoute(isNetworkAdvertised) + defaultRoute, err := udng.getDefaultRouteWithAdvertisedCheck() if err != nil { return nil, fmt.Errorf("unable to add default route for network %s, err: %v", udng.GetNetworkName(), err) } @@ -724,15 +725,7 @@ func (udng *UserDefinedNetworkGateway) computeRoutesForUDN(mpLink netlink.Link) return retVal, nil } -func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) ([]netlink.Route, error) { - vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) - // If the network is advertised on a non default VRF then we should only consider routes received from external BGP - // device and not send any traffic based on default route similar to one present in default VRF. This is more important - // for VRF-Lite usecase where we need traffic to leave from vlan device instead of default gateway interface. - if isNetworkAdvertised && !slices.Contains(vrfs, types.DefaultNetworkName) { - return nil, nil - } - +func (udng *UserDefinedNetworkGateway) getDefaultRoute() ([]netlink.Route, error) { networkMTU := udng.NetInfo.MTU() if networkMTU == 0 { networkMTU = config.Default.MTU @@ -757,6 +750,16 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) return retVal, nil } +func (udng *UserDefinedNetworkGateway) getDefaultRouteWithAdvertisedCheck() ([]netlink.Route, error) { + // If the network is advertised on a non default VRF then we should only consider routes received from external BGP + // device and not send any traffic based on default route similar to one present in default VRF. This is more important + // for VRF-Lite usecase where we need traffic to leave from vlan device instead of default gateway interface. + if udng.isNetworkAdvertised && !udng.isNetworkAdvertisedToDefaultVRF { + return nil, nil + } + return udng.getDefaultRoute() +} + // getV4MasqueradeIP returns the V4 management port masqueradeIP for this network func (udng *UserDefinedNetworkGateway) getV4MasqueradeIP() (*net.IPNet, error) { if !config.IPv4Mode { @@ -789,12 +792,15 @@ func (udng *UserDefinedNetworkGateway) getV6MasqueradeIP() (*net.IPNet, error) { // 2000: from all to 169.254.0.12 lookup 1007 // 2000: from all fwmark 0x1002 lookup 1009 // 2000: from all to 169.254.0.14 lookup 1009 -// If the network is advertised, an example of the rules we set for a network is: +// If the network is advertised to the default VRF, an example of the rules we set for a network is: // 2000: from all fwmark 0x1001 lookup 1007 // 2000: from all to 10.132.0.0/14 lookup 1007 // 2000: from all fwmark 0x1001 lookup 1009 // 2000: from all to 10.134.0.0/14 lookup 1009 -func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertised bool) ([]netlink.Rule, []netlink.Rule, error) { +// If the network is advertised ot a non-default VRF, an example of the rules we set for a network is: +// 2000: from all fwmark 0x1001 lookup 1007 +// 2000: from all fwmark 0x1001 lookup 1009 +func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules() ([]netlink.Rule, []netlink.Rule, error) { var addIPRules []netlink.Rule var delIPRules []netlink.Rule var masqIPRules []netlink.Rule @@ -827,12 +833,18 @@ func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertise } } switch { - case !isNetworkAdvertised: - addIPRules = append(addIPRules, masqIPRules...) - delIPRules = append(delIPRules, subnetIPRules...) - default: + case udng.isNetworkAdvertisedToDefaultVRF: + // the network is advertised to the default VRF + delIPRules = append(delIPRules, masqIPRules...) addIPRules = append(addIPRules, subnetIPRules...) + case udng.isNetworkAdvertised: + // the network is advertised to a non-default VRF delIPRules = append(delIPRules, masqIPRules...) + delIPRules = append(delIPRules, subnetIPRules...) + default: + // the network is not advertised + delIPRules = append(delIPRules, subnetIPRules...) + addIPRules = append(addIPRules, masqIPRules...) } return addIPRules, delIPRules, nil } @@ -928,19 +940,20 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { return fmt.Errorf("openflow manager with default bridge configuration has not been provided for network %s", udng.GetNetworkName()) } + udng.updateAdvertisementStatus() + // update bridge configuration - isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) netConfig := udng.openflowManager.defaultBridge.getNetworkBridgeConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } - netConfig.advertised.Store(isNetworkAdvertised) + netConfig.advertised.Store(udng.isNetworkAdvertised) - if err := udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { + if err := udng.updateUDNVRFIPRules(); err != nil { return fmt.Errorf("error while updating ip rule for UDN %s: %s", udng.GetNetworkName(), err) } - if err := udng.updateUDNVRFIPRoute(isNetworkAdvertised); err != nil { + if err := udng.updateUDNVRFIPRoute(); err != nil { return fmt.Errorf("error while updating ip route for UDN %s: %s", udng.GetNetworkName(), err) } @@ -954,16 +967,16 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // let's sync these flows immediately udng.openflowManager.requestFlowSync() - if err := udng.updateAdvertisedUDNIsolationRules(isNetworkAdvertised); err != nil { + if err := udng.updateAdvertisedUDNIsolationRules(); err != nil { return fmt.Errorf("error while updating advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) } return nil } // updateUDNVRFIPRules updates IP rules for a network depending on whether the -// network is advertised or not -func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules(isNetworkAdvertised bool) error { - addIPRules, deleteIPRules, err := udng.constructUDNVRFIPRules(isNetworkAdvertised) +// network is advertised to the default VRF or not +func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules() error { + addIPRules, deleteIPRules, err := udng.constructUDNVRFIPRules() if err != nil { return fmt.Errorf("unable to get iprules for network %s, err: %v", udng.GetNetworkName(), err) } @@ -982,30 +995,40 @@ func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules(isNetworkAdvertised b } // Add or remove default route from a vrf device based on the network is -// advertised on its own network or default network -func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRoute(isNetworkAdvertised bool) error { - vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) - if isNetworkAdvertised && !slices.Contains(vrfs, types.DefaultNetworkName) { +// advertised on its own network or the default network +func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRoute() error { + vrfName := util.GetNetworkVRFName(udng.NetInfo) + + switch { + case udng.isNetworkAdvertised && !udng.isNetworkAdvertisedToDefaultVRF: + // Remove default route for networks advertised to non-default VRF if err := udng.removeDefaultRouteFromVRF(); err != nil { - return fmt.Errorf("error while removing default route from VRF %s corresponding to network %s: %s", - util.GetNetworkVRFName(udng.NetInfo), udng.GetNetworkName(), err) + return fmt.Errorf("failed to remove default route from VRF %s for network %s: %v", + vrfName, udng.GetNetworkName(), err) } - } else if !isNetworkAdvertised || slices.Contains(vrfs, types.DefaultNetworkName) { - defaultRoute, err := udng.getDefaultRoute(isNetworkAdvertised) + + default: + // Add default route for networks that are either: + // - not advertised + // - advertised to default VRF + defaultRoute, err := udng.getDefaultRouteWithAdvertisedCheck() if err != nil { - return fmt.Errorf("unable to get default route for network %s, err: %v", udng.GetNetworkName(), err) + return fmt.Errorf("failed to get default route for network %s: %v", + udng.GetNetworkName(), err) } - if err = udng.vrfManager.AddVRFRoutes(util.GetNetworkVRFName(udng.NetInfo), defaultRoute); err != nil { - return fmt.Errorf("error while adding default route to VRF %s corresponding to network %s, err: %v", - util.GetNetworkVRFName(udng.NetInfo), udng.GetNetworkName(), err) + + if err = udng.vrfManager.AddVRFRoutes(vrfName, defaultRoute); err != nil { + return fmt.Errorf("failed to add default route to VRF %s for network %s: %v", + vrfName, udng.GetNetworkName(), err) } } + return nil } func (udng *UserDefinedNetworkGateway) removeDefaultRouteFromVRF() error { vrfDeviceName := util.GetNetworkVRFName(udng.NetInfo) - defaultRoute, err := udng.getDefaultRoute(false) + defaultRoute, err := udng.getDefaultRoute() if err != nil { return fmt.Errorf("unable to get default route for network %s, err: %v", udng.GetNetworkName(), err) } @@ -1034,39 +1057,22 @@ func (udng *UserDefinedNetworkGateway) removeDefaultRouteFromVRF() error { // comment "advertised UDNs V4 subnets" // elements = { 10.10.0.0/16 comment "cluster_udn_l3network" } // } -func (udng *UserDefinedNetworkGateway) updateAdvertisedUDNIsolationRules(isNetworkAdvertised bool) error { +func (udng *UserDefinedNetworkGateway) updateAdvertisedUDNIsolationRules() error { + switch { + case udng.isNetworkAdvertised: + return udng.addAdvertisedUDNIsolationRules() + default: + return udng.deleteAdvertisedUDNIsolationRules() + } +} + +func (udng *UserDefinedNetworkGateway) addAdvertisedUDNIsolationRules() error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return fmt.Errorf("failed to get nftables helper: %v", err) } tx := nft.NewTransaction() - if !isNetworkAdvertised { - existingV4, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) - if err != nil { - if !knftables.IsNotFound(err) { - return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV4, err) - } - } - existingV6, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV6) - if err != nil { - if !knftables.IsNotFound(err) { - return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV6, err) - } - } - - for _, elem := range append(existingV4, existingV6...) { - if elem.Comment != nil && *elem.Comment == udng.GetNetworkName() { - tx.Delete(elem) - } - } - - if tx.NumOperations() == 0 { - return nil - } - return nft.Run(context.TODO(), tx) - } - for _, udnNet := range udng.Subnets() { set := nftablesAdvertisedUDNsSetV4 if utilnet.IsIPv6CIDR(udnNet.CIDR) { @@ -1085,3 +1091,41 @@ func (udng *UserDefinedNetworkGateway) updateAdvertisedUDNIsolationRules(isNetwo } return nft.Run(context.TODO(), tx) } + +func (udng *UserDefinedNetworkGateway) deleteAdvertisedUDNIsolationRules() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %v", err) + } + tx := nft.NewTransaction() + + existingV4, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) + if err != nil { + if !knftables.IsNotFound(err) { + return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV4, err) + } + } + existingV6, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV6) + if err != nil { + if !knftables.IsNotFound(err) { + return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV6, err) + } + } + + for _, elem := range append(existingV4, existingV6...) { + if elem.Comment != nil && *elem.Comment == udng.GetNetworkName() { + tx.Delete(elem) + } + } + + if tx.NumOperations() == 0 { + return nil + } + return nft.Run(context.TODO(), tx) +} + +func (udng *UserDefinedNetworkGateway) updateAdvertisementStatus() { + vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) + udng.isNetworkAdvertised = len(vrfs) > 0 + udng.isNetworkAdvertisedToDefaultVRF = slices.Contains(vrfs, types.DefaultNetworkName) +} diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 1227163480..ac964dfeec 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -1754,7 +1754,7 @@ func TestConstructUDNVRFIPRules(t *testing.T) { }) g.Expect(err).NotTo(HaveOccurred()) udnGateway.vrfTableId = test.vrftableID - rules, delRules, err := udnGateway.constructUDNVRFIPRules(false) + rules, delRules, err := udnGateway.constructUDNVRFIPRules() g.Expect(err).ToNot(HaveOccurred()) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) @@ -1776,7 +1776,7 @@ func TestConstructUDNVRFIPRules(t *testing.T) { } } -func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { +func TestConstructUDNVRFIPRulesPodNetworkAdvertisedToTheDefaultNetwork(t *testing.T) { type testRule struct { priority int family int @@ -1941,7 +1941,198 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { }) g.Expect(err).NotTo(HaveOccurred()) udnGateway.vrfTableId = test.vrftableID - rules, delRules, err := udnGateway.constructUDNVRFIPRules(true) + udnGateway.isNetworkAdvertised = true + udnGateway.isNetworkAdvertisedToDefaultVRF = true + rules, delRules, err := udnGateway.constructUDNVRFIPRules() + g.Expect(err).ToNot(HaveOccurred()) + for i, rule := range rules { + g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) + g.Expect(rule.Table).To(Equal(test.expectedRules[i].table)) + g.Expect(rule.Family).To(Equal(test.expectedRules[i].family)) + if rule.Dst != nil { + g.Expect(*rule.Dst).To(Equal(test.expectedRules[i].dst)) + } else { + g.Expect(rule.Mark).To(Equal(test.expectedRules[i].mark)) + } + } + for i, rule := range delRules { + g.Expect(rule.Priority).To(Equal(test.deleteRules[i].priority)) + g.Expect(rule.Table).To(Equal(test.deleteRules[i].table)) + g.Expect(rule.Family).To(Equal(test.deleteRules[i].family)) + g.Expect(*rule.Dst).To(Equal(test.deleteRules[i].dst)) + } + }) + } +} + +func TestConstructUDNVRFIPRulesPodNetworkAdvertisedToNoneDefaultNetwork(t *testing.T) { + type testRule struct { + priority int + family int + table int + mark uint32 + dst net.IPNet + } + type testConfig struct { + desc string + vrftableID int + v4mode bool + v6mode bool + expectedRules []testRule + deleteRules []testRule + } + + tests := []testConfig{ + { + desc: "v4 rule test", + vrftableID: 1007, + expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1007, + mark: 0x1003, + }, + }, + deleteRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1007, + dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("169.254.0.16")), + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1007, + dst: *ovntest.MustParseIPNet("100.128.0.0/16"), + }, + }, + v4mode: true, + }, + { + desc: "v6 rule test", + vrftableID: 1009, + expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1009, + mark: 0x1003, + }, + }, + deleteRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1009, + dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("fd69::10")), + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1009, + dst: *ovntest.MustParseIPNet("ae70::/60"), + }, + }, + v6mode: true, + }, + { + desc: "dualstack rule test", + vrftableID: 1010, + expectedRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1010, + mark: 0x1003, + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1010, + mark: 0x1003, + }, + }, + deleteRules: []testRule{ + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1010, + dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("169.254.0.16")), + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1010, + dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("fd69::10")), + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V4, + table: 1010, + dst: *ovntest.MustParseIPNet("100.128.0.0/16"), + }, + { + priority: UDNMasqueradeIPRulePriority, + family: netlink.FAMILY_V6, + table: 1010, + dst: *ovntest.MustParseIPNet("ae70::/60"), + }, + }, + v4mode: true, + v6mode: true, + }, + } + config.Gateway.V6MasqueradeSubnet = "fd69::/112" + config.Gateway.V4MasqueradeSubnet = "169.254.0.0/16" + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + g := NewWithT(t) + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + } + config.IPv4Mode = test.v4mode + config.IPv6Mode = test.v6mode + cidr := "" + if config.IPv4Mode { + cidr = "100.128.0.0/16/24" + } + if config.IPv4Mode && config.IPv6Mode { + cidr += ",ae70::/60" + } else if config.IPv6Mode { + cidr = "ae70::/60" + } + nad := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", + types.Layer3Topology, cidr, types.NetworkRolePrimary) + ovntest.AnnotateNADWithNetworkID("3", nad) + netInfo, err := util.ParseNADInfo(nad) + g.Expect(err).ToNot(HaveOccurred()) + mutableNetInfo := util.NewMutableNetInfo(netInfo) + mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{node.Name: {"bluenet"}}) + ofm := getDummyOpenflowManager() + // create dummy gateway interface(Need to run this test as root) + err = netlink.LinkAdd(&netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "breth0", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, nil, nil, &gateway{openflowManager: ofm}) + g.Expect(err).NotTo(HaveOccurred()) + // delete dummy gateway interface after creating UDN gateway(Need to run this test as root) + err = netlink.LinkDel(&netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "breth0", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + udnGateway.vrfTableId = test.vrftableID + udnGateway.isNetworkAdvertised = true + udnGateway.isNetworkAdvertisedToDefaultVRF = false + rules, delRules, err := udnGateway.constructUDNVRFIPRules() g.Expect(err).ToNot(HaveOccurred()) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) @@ -2072,7 +2263,8 @@ func TestUserDefinedNetworkGateway_updateAdvertisedUDNIsolationRules(t *testing. udng := &UserDefinedNetworkGateway{ NetInfo: netInfo, } - err = udng.updateAdvertisedUDNIsolationRules(tt.isNetworkAdvertised) + udng.isNetworkAdvertised = tt.isNetworkAdvertised + err = udng.updateAdvertisedUDNIsolationRules() g.Expect(err).NotTo(HaveOccurred()) v4Elems, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) From 424a653f38514940a99fbfe528b587fdafd2e687 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Fri, 13 Jun 2025 11:24:28 +0200 Subject: [PATCH 037/181] NodeTracker: Only update the node if the subnet changed for the specific network Previously every update `k8s.ovn.org/node-subnets` caused a call to `nt.updateNode` on every network. Signed-off-by: Patryk Diak --- go-controller/pkg/ovn/controller/services/node_tracker.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go-controller/pkg/ovn/controller/services/node_tracker.go b/go-controller/pkg/ovn/controller/services/node_tracker.go index 0ee0997eda..9fecf577c1 100644 --- a/go-controller/pkg/ovn/controller/services/node_tracker.go +++ b/go-controller/pkg/ovn/controller/services/node_tracker.go @@ -119,7 +119,7 @@ func (nt *nodeTracker) Start(nodeInformer coreinformers.NodeInformer) (cache.Res // - node changes its zone // - node becomes a hybrid overlay node from a ovn node or vice verse // . No need to trigger update for any other field change. - if util.NodeSubnetAnnotationChanged(oldObj, newObj) || + if util.NodeSubnetAnnotationChangedForNetwork(oldObj, newObj, nt.netInfo.GetNetworkName()) || util.NodeL3GatewayAnnotationChanged(oldObj, newObj) || oldObj.Name != newObj.Name || util.NodeHostCIDRsAnnotationChanged(oldObj, newObj) || @@ -169,7 +169,7 @@ func (nt *nodeTracker) updateNodeInfo(nodeName, switchName, routerName, chassisI ni.podSubnets = append(ni.podSubnets, *podSubnets[i]) // de-pointer } - klog.Infof("Node %s switch + router changed, syncing services", nodeName) + klog.Infof("Node %s switch + router changed, syncing services in network %q", nodeName, nt.netInfo.GetNetworkName()) nt.Lock() defer nt.Unlock() @@ -208,7 +208,7 @@ func (nt *nodeTracker) removeNode(nodeName string) { // The switch exists when the HostSubnet annotation is set. // The gateway router will exist sometime after the L3Gateway annotation is set. func (nt *nodeTracker) updateNode(node *corev1.Node) { - klog.V(2).Infof("Processing possible switch / router updates for node %s", node.Name) + klog.V(2).Infof("Processing possible switch / router updates for node %s in network %q", node.Name, nt.netInfo.GetNetworkName()) var hsn []*net.IPNet var err error if nt.netInfo.TopologyType() == types.Layer2Topology { From 1fb898316de8119429412f0f7c2e8d44c0d3a864 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Mon, 16 Jun 2025 10:33:02 +0200 Subject: [PATCH 038/181] [e2e] kubevirt: generate test name that is compatible with file path. When collecting the logs after a failed test run, test name is used as a part of the file path, and double quotes are not allowed: ``` The following characters are not allowed in files that are uploaded due to limitations with certain file systems such as NTFS. To maintain file system agnostic behavior, these characters are intentionally not allowed to prevent potential problems with downloads on different file systems. ``` Signed-off-by: Nadia Pinaeva --- test/e2e/kubevirt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index aa0a6a246c..9587e520e9 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1947,7 +1947,7 @@ ip route add %[3]s via %[4]s if td.ingress != "" { ingress = td.ingress } - return fmt.Sprintf("after %s of %s with %s/%s with %q ingress", td.test.description, td.resource.description, role, td.topology, ingress) + return fmt.Sprintf("after %s of %s with %s/%s with %s ingress", td.test.description, td.resource.description, role, td.topology, ingress) }, Entry(nil, testData{ resource: virtualMachine, From 70fe56c65704da7377742d9e6de00e6d94e13754 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Tue, 4 Mar 2025 10:57:19 +0000 Subject: [PATCH 039/181] Add labels to filter e2e tests Signed-off-by: Martin Kennelly --- test/e2e/acl_logging.go | 8 +- test/e2e/e2e_suite_test.go | 27 +--- test/e2e/egress_firewall.go | 3 +- test/e2e/egress_services.go | 3 +- test/e2e/egressip.go | 3 +- test/e2e/egressqos.go | 3 +- test/e2e/external_gateways.go | 3 +- test/e2e/feature/features.go | 31 ++++ test/e2e/gateway_mtu.go | 3 +- test/e2e/kubevirt.go | 3 +- test/e2e/label/component.go | 7 + test/e2e/label/label.go | 42 +++++ test/e2e/label/override.go | 5 + test/e2e/multi_node_zones_interconnect.go | 3 +- test/e2e/multicast.go | 3 +- test/e2e/multihoming.go | 3 +- test/e2e/network_segmentation.go | 3 +- ...work_segmentation_endpointslices_mirror.go | 8 +- test/e2e/network_segmentation_policy.go | 3 +- test/e2e/network_segmentation_services.go | 3 +- test/e2e/node_ip_mac_migration.go | 3 +- test/e2e/ovspinning.go | 3 +- test/e2e/service.go | 7 +- test/e2e/status_manager.go | 3 +- test/e2e/testcontext.go | 143 ++++++++++++++++++ test/e2e/unidling.go | 3 +- 26 files changed, 279 insertions(+), 50 deletions(-) create mode 100644 test/e2e/feature/features.go create mode 100644 test/e2e/label/component.go create mode 100644 test/e2e/label/label.go create mode 100644 test/e2e/label/override.go create mode 100644 test/e2e/testcontext.go diff --git a/test/e2e/acl_logging.go b/test/e2e/acl_logging.go index 0ea81c6f71..c5c129769b 100644 --- a/test/e2e/acl_logging.go +++ b/test/e2e/acl_logging.go @@ -9,6 +9,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" + v1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +27,7 @@ const ( pokeInterval = 1 * time.Second ) -var _ = Describe("ACL Logging for NetworkPolicy", func() { +var _ = Describe("ACL Logging for NetworkPolicy", feature.NetworkPolicy, func() { const ( denyAllPolicyName = "default-deny-all" initialDenyACLSeverity = "alert" @@ -172,7 +174,7 @@ var _ = Describe("ACL Logging for NetworkPolicy", func() { }) }) -var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPolicy", func() { +var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPolicy", feature.AdminNetworkPolicy, feature.BaselineNetworkPolicy, func() { const ( initialDenyACLSeverity = "alert" initialAllowACLSeverity = "notice" @@ -487,7 +489,7 @@ var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPol }) }) -var _ = Describe("ACL Logging for EgressFirewall", func() { +var _ = Describe("ACL Logging for EgressFirewall", feature.EgressFirewall, func() { const ( denyAllPolicyName = "default-deny-all" initialDenyACLSeverity = "alert" diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 0359b3461b..d96b488297 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -13,13 +13,13 @@ import ( "github.com/ovn-org/ovn-kubernetes/test/e2e/diagnostics" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" "github.com/ovn-org/ovn-kubernetes/test/e2e/ipalloc" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" e2econfig "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/utils/image" ) // https://github.com/kubernetes/kubernetes/blob/v1.16.4/test/e2e/e2e_test.go#L62 @@ -55,26 +55,7 @@ var _ = ginkgo.BeforeSuite(func() { func TestMain(m *testing.M) { // Register test flags, then parse flags. handleFlags() - - if framework.TestContext.ListImages { - for _, v := range image.GetImageConfigs() { - fmt.Println(v.GetE2EImage()) - } - os.Exit(0) - } - // reset provider to skeleton as Kubernetes test framework expects a supported provider - framework.TestContext.Provider = "skeleton" - framework.AfterReadingAllFlags(&framework.TestContext) - - // TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987. - // Right now it is still needed, for example by - // test/e2e/framework/ingress/ingress_utils.go - // for providing the optional secret.yaml file and by - // test/e2e/framework/util.go for cluster/log-dump. - if framework.TestContext.RepoRoot != "" { - testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) - } - + ProcessTestContextAndSetupLogging() os.Exit(m.Run()) } @@ -88,5 +69,5 @@ func TestE2E(t *testing.T) { } } gomega.RegisterFailHandler(framework.Fail) - ginkgo.RunSpecs(t, "E2E Suite") + ginkgo.RunSpecs(t, "E2E Suite", label.ComponentName()) } diff --git a/test/e2e/egress_firewall.go b/test/e2e/egress_firewall.go index e5a3f8518a..32974beb1c 100644 --- a/test/e2e/egress_firewall.go +++ b/test/e2e/egress_firewall.go @@ -19,6 +19,7 @@ import ( "github.com/onsi/ginkgo/extensions/table" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +35,7 @@ import ( // Validate the egress firewall policies by applying a policy and verify // that both explicitly allowed traffic and implicitly denied traffic // is properly handled as defined in the crd configuration in the test. -var _ = ginkgo.Describe("e2e egress firewall policy validation", func() { +var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressFirewall, func() { const ( svcname string = "egress-firewall-policy" egressFirewallYamlFile string = "egress-fw.yml" diff --git a/test/e2e/egress_services.go b/test/e2e/egress_services.go index eb9cb38942..2afcb2edc8 100644 --- a/test/e2e/egress_services.go +++ b/test/e2e/egress_services.go @@ -13,6 +13,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" @@ -32,7 +33,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = ginkgo.Describe("EgressService", func() { +var _ = ginkgo.Describe("EgressService", feature.EgressService, func() { const ( egressServiceYAML = "egress_service.yaml" externalContainerName = "external-container-for-egress-service" diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index 162af8fad0..7faad7185e 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -20,6 +20,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -377,7 +378,7 @@ type egressIPs struct { Items []egressIP `json:"items"` } -var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", func(netConfigParams networkAttachmentConfigParams) { +var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP, func(netConfigParams networkAttachmentConfigParams) { //FIXME: tests for CDN are designed for single stack clusters (IPv4 or IPv6) and must choose a single IP family for dual stack clusters. // Remove this restriction and allow the tests to detect if an IP family support is available. const ( diff --git a/test/e2e/egressqos.go b/test/e2e/egressqos.go index 4f6b282027..0d32a9a514 100644 --- a/test/e2e/egressqos.go +++ b/test/e2e/egressqos.go @@ -10,6 +10,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "golang.org/x/sync/errgroup" v1 "k8s.io/api/core/v1" @@ -19,7 +20,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("e2e EgressQoS validation", func() { +var _ = ginkgo.Describe("e2e EgressQoS validation", feature.EgressQos, func() { const ( egressQoSYaml = "egressqos.yaml" srcPodName = "src-dscp-pod" diff --git a/test/e2e/external_gateways.go b/test/e2e/external_gateways.go index c7bf83d9f9..4a119ae96b 100644 --- a/test/e2e/external_gateways.go +++ b/test/e2e/external_gateways.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -74,7 +75,7 @@ type gatewayTestIPs struct { targetIPs []string } -var _ = ginkgo.Describe("External Gateway", func() { +var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { const ( gwTCPPort = 80 diff --git a/test/e2e/feature/features.go b/test/e2e/feature/features.go new file mode 100644 index 0000000000..842b0474e6 --- /dev/null +++ b/test/e2e/feature/features.go @@ -0,0 +1,31 @@ +package feature + +import ( + "github.com/onsi/ginkgo/v2" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" +) + +var ( + Service = New("Service") + NetworkPolicy = New("NetworkPolicy") + AdminNetworkPolicy = New("AdminNetworkPolicy") + BaselineNetworkPolicy = New("BaselineNetworkPolicy") + NetworkSegmentation = New("NetworkSegmentation") + EgressIP = New("EgressIP") + EgressService = New("EgressService") + EgressFirewall = New("EgressFirewall") + EgressQos = New("EgressQos") + ExternalGateway = New("ExternalGateway") + DisablePacketMTUCheck = New("DisablePacketMTUCheck") + VirtualMachineSupport = New("VirtualMachineSupport") + Interconnect = New("Interconnect") + Multicast = New("Multicast") + MultiHoming = New("MultiHoming") + NodeIPMACMigration = New("NodeIPMACMigration") + OVSCPUPin = New("OVSCPUPin") + Unidle = New("Unidle") +) + +func New(name string) ginkgo.Labels { + return label.New("Feature", name).GinkgoLabel() +} diff --git a/test/e2e/gateway_mtu.go b/test/e2e/gateway_mtu.go index 386ecba5d3..ec3b3b48d9 100644 --- a/test/e2e/gateway_mtu.go +++ b/test/e2e/gateway_mtu.go @@ -5,12 +5,13 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("Check whether gateway-mtu-support annotation on node is set based on disable-pkt-mtu-check value", func() { +var _ = ginkgo.Describe("Check whether gateway-mtu-support annotation on node is set based on disable-pkt-mtu-check value", feature.DisablePacketMTUCheck, func() { var nodes *v1.NodeList f := wrappedTestFramework("gateway-mtu-support") diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 9587e520e9..d6a774ec4d 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -24,6 +24,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/diagnostics" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -97,7 +98,7 @@ func newControllerRuntimeClient() (crclient.Client, error) { }) } -var _ = Describe("Kubevirt Virtual Machines", func() { +var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, func() { var ( fr = wrappedTestFramework("kv-live-migration") d = diagnostics.New(fr) diff --git a/test/e2e/label/component.go b/test/e2e/label/component.go new file mode 100644 index 0000000000..59e61165c5 --- /dev/null +++ b/test/e2e/label/component.go @@ -0,0 +1,7 @@ +package label + +import "github.com/onsi/ginkgo/v2" + +func ComponentName() ginkgo.Labels { + return NewComponent("ovn-kubernetes") +} diff --git a/test/e2e/label/label.go b/test/e2e/label/label.go new file mode 100644 index 0000000000..6f81c9ceb1 --- /dev/null +++ b/test/e2e/label/label.go @@ -0,0 +1,42 @@ +package label + +import "github.com/onsi/ginkgo/v2" + +// Label is a wrapper for ginkgo label. We need a wrapper because we want to constrain inputs. If Key and Value are not +// empty, then it will be concatenated together seperated by ':'. If Key is not empty and Value is empty, then only the Key is used. +type Label struct { + // Key is mandatory + Key string + // Value is optional + Value string +} + +func (l Label) GinkgoLabel() ginkgo.Labels { + if l.Value == "" { + return ginkgo.Label(l.Key) + } + return ginkgo.Label(l.Key + ":" + l.Value) +} + +func NewComponent(name string) ginkgo.Labels { + return New(name, "").GinkgoLabel() +} + +func New(parts ...string) Label { + if len(parts) == 0 || len(parts) > 2 { + panic("invalid number of label constituents") + } + key, val := processOverrides(parts[0]), processOverrides(parts[1]) + return Label{ + Key: key, + Value: val, + } +} + +func processOverrides(s string) string { + overRide, ok := overrideMap[s] + if !ok { + return s + } + return overRide +} diff --git a/test/e2e/label/override.go b/test/e2e/label/override.go new file mode 100644 index 0000000000..31aa0fa0cd --- /dev/null +++ b/test/e2e/label/override.go @@ -0,0 +1,5 @@ +package label + +// overrideMap is used to rewrite label key and/or values. For example, if you want to rewrite Feature to a downstream specific name, +// therefore youd add "Feature" as a key to the overrides map and value to be what you wish to rewrite it to. +var overrideMap = map[string]string{} diff --git a/test/e2e/multi_node_zones_interconnect.go b/test/e2e/multi_node_zones_interconnect.go index 5737ec3680..0a358cd7ea 100644 --- a/test/e2e/multi_node_zones_interconnect.go +++ b/test/e2e/multi_node_zones_interconnect.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,7 +88,7 @@ func checkPodsInterconnectivity(clientPod, serverPod *v1.Pod, namespace string, return nil } -var _ = ginkgo.Describe("Multi node zones interconnect", func() { +var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, func() { const ( serverPodNodeName = "ovn-control-plane" diff --git a/test/e2e/multicast.go b/test/e2e/multicast.go index f90cf37b5f..d9b2bc3d9c 100644 --- a/test/e2e/multicast.go +++ b/test/e2e/multicast.go @@ -8,6 +8,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +25,7 @@ type nodeInfo struct { nodeIP string } -var _ = ginkgo.Describe("Multicast", func() { +var _ = ginkgo.Describe("Multicast", feature.Multicast, func() { fr := wrappedTestFramework("multicast") diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 49884ab548..a2f611676b 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -14,6 +14,7 @@ import ( "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/docker/docker/client" v1 "k8s.io/api/core/v1" @@ -37,7 +38,7 @@ const ( nodeHostnameKey = "kubernetes.io/hostname" ) -var _ = Describe("Multi Homing", func() { +var _ = Describe("Multi Homing", feature.MultiHoming, func() { const ( podName = "tinypod" secondaryNetworkCIDR = "10.128.0.0/16" diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index a3105f2ab0..83fc059678 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -20,6 +20,7 @@ import ( "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -44,7 +45,7 @@ const openDefaultPortsAnnotation = "k8s.ovn.org/open-default-ports" const RequiredUDNNamespaceLabel = "k8s.ovn.org/primary-user-defined-network" const OvnPodAnnotationName = "k8s.ovn.org/pod-networks" -var _ = Describe("Network Segmentation", func() { +var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { f := wrappedTestFramework("network-segmentation") // disable automatic namespace creation, we need to add the required UDN label f.SkipNamespaceCreation = true diff --git a/test/e2e/network_segmentation_endpointslices_mirror.go b/test/e2e/network_segmentation_endpointslices_mirror.go index 171073bdae..3790b2d568 100644 --- a/test/e2e/network_segmentation_endpointslices_mirror.go +++ b/test/e2e/network_segmentation_endpointslices_mirror.go @@ -5,14 +5,14 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -23,7 +23,7 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" ) -var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { +var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.NetworkSegmentation, func() { f := wrappedTestFramework("endpointslices-mirror") f.SkipNamespaceCreation = true Context("a user defined primary network", func() { diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 10e2b0f0e7..30bc1dc0a5 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -9,6 +9,7 @@ import ( nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" @@ -18,7 +19,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" ) -var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { +var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.NetworkSegmentation, func() { f := wrappedTestFramework("network-segmentation") f.SkipNamespaceCreation = true diff --git a/test/e2e/network_segmentation_services.go b/test/e2e/network_segmentation_services.go index d580bc190f..6f0822064f 100644 --- a/test/e2e/network_segmentation_services.go +++ b/test/e2e/network_segmentation_services.go @@ -12,6 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -30,7 +31,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = Describe("Network Segmentation: services", func() { +var _ = Describe("Network Segmentation: services", feature.NetworkSegmentation, func() { f := wrappedTestFramework("udn-services") f.SkipNamespaceCreation = true diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index d84ce6d737..a74d161c0d 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -18,6 +18,7 @@ import ( . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -35,7 +36,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = Describe("Node IP and MAC address migration", func() { +var _ = Describe("Node IP and MAC address migration", feature.NodeIPMACMigration, func() { const ( namespacePrefix = "node-ip-migration" podWorkerNodeName = "primary" diff --git a/test/e2e/ovspinning.go b/test/e2e/ovspinning.go index af72285ead..f3d94b530b 100644 --- a/test/e2e/ovspinning.go +++ b/test/e2e/ovspinning.go @@ -7,13 +7,14 @@ import ( "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("OVS CPU affinity pinning", func() { +var _ = ginkgo.Describe("OVS CPU affinity pinning", feature.OVSCPUPin, func() { f := wrappedTestFramework("ovspinning") diff --git a/test/e2e/service.go b/test/e2e/service.go index 664a01e8ea..0df017d523 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -17,6 +17,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -52,7 +53,7 @@ var ( reportPath string ) -var _ = ginkgo.Describe("Services", func() { +var _ = ginkgo.Describe("Services", feature.Service, func() { const ( serviceName = "testservice" echoServerPodNameTemplate = "echo-server-pod-%d" @@ -1424,7 +1425,7 @@ func getServiceBackendsFromPod(execPod *v1.Pod, serviceIP string, servicePort in // service ip; if the traffic was DNAT-ed to the same src pod (hairpin/loopback case) - // the srcIP of reply traffic is SNATed to the special masqurade IP 169.254.0.5 // or "fd69::5" -var _ = ginkgo.Describe("Service Hairpin SNAT", func() { +var _ = ginkgo.Describe("Service Hairpin SNAT", feature.Service, func() { const ( svcName = "service-hairpin-test" backendName = "hairpin-backend-pod" @@ -1522,7 +1523,7 @@ var _ = ginkgo.Describe("Service Hairpin SNAT", func() { }) -var _ = ginkgo.Describe("Load Balancer Service Tests with MetalLB", func() { +var _ = ginkgo.Describe("Load Balancer Service Tests with MetalLB", feature.Service, func() { const ( svcName = "lbservice-test" diff --git a/test/e2e/status_manager.go b/test/e2e/status_manager.go index b6e7a9bfeb..bae96224ae 100644 --- a/test/e2e/status_manager.go +++ b/test/e2e/status_manager.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,7 +17,7 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" ) -var _ = ginkgo.Describe("Status manager validation", func() { +var _ = ginkgo.Describe("Status manager validation", feature.EgressFirewall, func() { const ( svcname string = "status-manager" egressFirewallYamlFile string = "egress-fw.yml" diff --git a/test/e2e/testcontext.go b/test/e2e/testcontext.go new file mode 100644 index 0000000000..1b8104ab44 --- /dev/null +++ b/test/e2e/testcontext.go @@ -0,0 +1,143 @@ +package e2e + +import ( + "errors" + "os" + "path" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/reporters" + ginkgotypes "github.com/onsi/ginkgo/v2/types" + "github.com/onsi/gomega" + + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/framework" +) + +// ProcessTestContextAndSetupLogging copied up k8 e2e test framework pkg because we need to remove the label check. +func ProcessTestContextAndSetupLogging() { + t := &framework.TestContext + // default copied from k8 e2e test framework pkg + // Reconfigure gomega defaults. The poll interval should be suitable + // for most tests. The timeouts are more subjective and tests may want + // to override them, but these defaults are still better for E2E than the + // ones from Gomega (1s timeout, 10ms interval). + var defaultTimeouts = framework.TimeoutContext{ + Poll: 2 * time.Second, // from the former e2e/framework/pod poll interval + PodStart: 5 * time.Minute, + PodStartShort: 2 * time.Minute, + PodStartSlow: 15 * time.Minute, + PodDelete: 5 * time.Minute, + ClaimProvision: 5 * time.Minute, + ClaimProvisionShort: 1 * time.Minute, + DataSourceProvision: 5 * time.Minute, + ClaimBound: 3 * time.Minute, + PVReclaim: 3 * time.Minute, + PVBound: 3 * time.Minute, + PVCreate: 3 * time.Minute, + PVDelete: 5 * time.Minute, + PVDeleteSlow: 20 * time.Minute, + SnapshotCreate: 5 * time.Minute, + SnapshotDelete: 5 * time.Minute, + SnapshotControllerMetrics: 5 * time.Minute, + SystemPodsStartup: 10 * time.Minute, + NodeSchedulable: 30 * time.Minute, + SystemDaemonsetStartup: 5 * time.Minute, + NodeNotReady: 3 * time.Minute, + } + gomega.SetDefaultEventuallyPollingInterval(defaultTimeouts.Poll) + gomega.SetDefaultConsistentlyPollingInterval(defaultTimeouts.Poll) + gomega.SetDefaultEventuallyTimeout(defaultTimeouts.PodStart) + gomega.SetDefaultConsistentlyDuration(defaultTimeouts.PodStartShort) + + // Allow 1% of nodes to be unready (statistically) - relevant for large clusters. + if t.AllowedNotReadyNodes == 0 { + t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100 + } + + // Make sure that all test runs have a valid TestContext.CloudConfig.Provider. + // TODO: whether and how long this code is needed is getting discussed + // in https://github.com/kubernetes/kubernetes/issues/70194. + if t.Provider == "" { + t.Provider = "skeleton" + } + + var err error + t.CloudConfig.Provider, err = framework.SetupProviderConfig(t.Provider) + if err != nil { + if os.IsNotExist(errors.Unwrap(err)) { + klog.Errorf("Unknown provider %q. ", t.Provider) + } else { + klog.Errorf("Failed to setup provider config for %q: %v", t.Provider, err) + } + os.Exit(1) + } + + if t.ReportDir != "" { + // Create the directory before running the suite. If + // --report-dir is not unusable, we should report + // that as soon as possible. This will be done by each worker + // in parallel, so we will get "exists" error in most of them. + if err := os.MkdirAll(t.ReportDir, 0777); err != nil && !os.IsExist(err) { + klog.Errorf("Create report dir: %v", err) + os.Exit(1) + } + ginkgoDir := path.Join(t.ReportDir, "ginkgo") + if t.ReportCompleteGinkgo || t.ReportCompleteJUnit { + if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) { + klog.Errorf("Create /ginkgo: %v", err) + os.Exit(1) + } + } + + if t.ReportCompleteGinkgo { + ginkgo.ReportAfterSuite("Ginkgo JSON report", func(report ginkgo.Report) { + gomega.Expect(reporters.GenerateJSONReport(report, path.Join(ginkgoDir, "report.json"))).NotTo(gomega.HaveOccurred()) + }) + ginkgo.ReportAfterSuite("JUnit XML report", func(report ginkgo.Report) { + gomega.Expect(reporters.GenerateJUnitReport(report, path.Join(ginkgoDir, "report.xml"))).NotTo(gomega.HaveOccurred()) + }) + } + + ginkgo.ReportAfterSuite("OVN-Kubernetes e2e JUnit report", func(report ginkgo.Report) { + // With Ginkgo v1, we used to write one file per + // parallel node. Now Ginkgo v2 automatically merges + // all results into a report for us. The 01 suffix is + // kept in case that users expect files to be called + // "junit_.xml". + junitReport := path.Join(t.ReportDir, "junit_"+t.ReportPrefix+"01.xml") + + // writeJUnitReport generates a JUnit file in the e2e + // report directory that is shorter than the one + // normally written by `ginkgo --junit-report`. This is + // needed because the full report can become too large + // for tools like Spyglass + // (https://github.com/kubernetes/kubernetes/issues/111510). + gomega.Expect(writeJUnitReport(report, junitReport)).NotTo(gomega.HaveOccurred()) + }) + } +} + +// writeJUnitReport generates a JUnit file that is shorter than the one +// normally written by `ginkgo --junit-report`. This is needed because the full +// report can become too large for tools like Spyglass +// (https://github.com/kubernetes/kubernetes/issues/111510). +func writeJUnitReport(report ginkgo.Report, filename string) error { + config := reporters.JunitReportConfig{ + // Remove details for specs where we don't care. + OmitTimelinesForSpecState: ginkgotypes.SpecStatePassed | ginkgotypes.SpecStateSkipped, + + // Don't write . The same text is + // also in the full text for the failure. If we were to write + // both, then tools like kettle and spyglass would concatenate + // the two strings and thus show duplicated information. + OmitFailureMessageAttr: true, + + // All labels are also part of the spec texts in inline [] tags, + // so we don't need to write them separately. + OmitSpecLabels: true, + } + + return reporters.GenerateJUnitReportWithConfig(report, filename, config) +} diff --git a/test/e2e/unidling.go b/test/e2e/unidling.go index 9566b3190f..9f7535a9b2 100644 --- a/test/e2e/unidling.go +++ b/test/e2e/unidling.go @@ -14,6 +14,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +38,7 @@ const ( // Validate that Services with the well-known annotation k8s.ovn.org/idled-at // generate a NeedPods Event if the service doesn´t have endpoints and // OVN EmptyLB-Backends feature is enabled -var _ = ginkgo.Describe("Unidling", func() { +var _ = ginkgo.Describe("Unidling", feature.Unidle, func() { const ( serviceName = "empty-service" From 19be786c5ae7074f8e0885fe81898642ef7b7f8e Mon Sep 17 00:00:00 2001 From: Artyom Babiy Date: Thu, 19 Jun 2025 20:05:23 +0300 Subject: [PATCH 040/181] use slash as path separator for some sysctl commands Convert `.` path separators to `/` when enabling forwarding for bridgeName, interfaceName and mgmtPortName to avoid errors when those names contain `.` characters e.g. `foo.200` Fixes: #5283 Signed-off-by: Artyom Babiy --- go-controller/pkg/node/gateway_init.go | 6 ++++-- go-controller/pkg/node/gateway_init_linux_test.go | 6 +++--- go-controller/pkg/node/gateway_udn.go | 13 +++++++++---- go-controller/pkg/node/gateway_udn_test.go | 6 +++--- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index c7553f7d0d..28e0fa669b 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -27,8 +27,10 @@ import ( func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { // IPv6 forwarding is enabled globally if config.IPv4Mode { - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", bridgeName)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", bridgeName) { + // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", bridgeName, stdout, stderr, err) } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index e9f248c419..0f6eab05ce 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -166,7 +166,7 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } @@ -595,7 +595,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.brp0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/brp0/forwarding=1", Output: "net.ipv4.conf.brp0.forwarding = 1", }) } @@ -1057,7 +1057,7 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index b207a4f009..7ab5b50cc9 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "slices" + "strings" "sync/atomic" "time" @@ -522,8 +523,10 @@ func (udng *UserDefinedNetworkGateway) addUDNManagementPort() (netlink.Link, err // STEP3 // IPv6 forwarding is enabled globally if ipv4, _ := udng.IPMode(); ipv4 { - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", interfaceName)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", interfaceName) { + // we use forward slash as path separator to allow dotted interfaceName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", interfaceName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(interfaceName, ".", "/")) { return nil, fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", interfaceName, stdout, stderr, err) } @@ -891,8 +894,10 @@ func addRPFilterLooseModeForManagementPort(mgmtPortName string) error { rpFilterLooseMode := "2" // TODO: Convert testing framework to mock golang module utilities. Example: // result, err := sysctl.Sysctl(fmt.Sprintf("net/ipv4/conf/%s/rp_filter", types.K8sMgmtIntfName), rpFilterLooseMode) - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.rp_filter=%s", mgmtPortName, rpFilterLooseMode)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.rp_filter = %s", mgmtPortName, rpFilterLooseMode) { + // we use forward slash as path separator to allow dotted mgmtPortName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/rp_filter=%s", mgmtPortName, rpFilterLooseMode)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.rp_filter = %s", strings.ReplaceAll(mgmtPortName, ".", "/"), rpFilterLooseMode) { return fmt.Errorf("could not set the correct rp_filter value for interface %s: stdout: %v, stderr: %v, err: %v", mgmtPortName, stdout, stderr, err) } diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index ac964dfeec..8c38c7ec5b 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -59,14 +59,14 @@ func getCreationFakeCommands(fexec *ovntest.FakeExec, mgtPort, mgtPortMAC, netNa }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf." + mgtPort + ".forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/" + mgtPort + "/forwarding=1", Output: "net.ipv4.conf." + mgtPort + ".forwarding = 1", }) } func getRPFilterLooseModeFakeCommands(fexec *ovntest.FakeExec) { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.ovn-k8s-mp3.rp_filter=2", + Cmd: "sysctl -w net/ipv4/conf/ovn-k8s-mp3/rp_filter=2", Output: "net.ipv4.conf.ovn-k8s-mp3.rp_filter = 2", }) } @@ -148,7 +148,7 @@ func setUpGatewayFakeOVSCommands(fexec *ovntest.FakeExec) { }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } From 21e4f0eb5e2e3467bbf33487ddafd8ea0e1dbda0 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Sun, 8 Jun 2025 22:06:38 +0200 Subject: [PATCH 041/181] Fix the host drop rules to match on new state When we did the NFT rules to block traffic going from host to advertised UDN pod subnets, we did not mean to also block replies from host to advertised UDN pod subnets for traffic initiated by UDN pods. Given the rules lie in OUTPUT table this would match on replies as well, so traffic like pod to kube-apiserver host-networked pod backend is broken because of this. Let's change the rule to only match on NEW state which is what we wanted to do in the original change. The current rules unintentionally block traffic in reverse direction. Signed-off-by: Surya Seetharaman --- go-controller/pkg/node/gateway_shared_intf.go | 8 +- test/e2e/route_advertisements.go | 75 ++++++++++++++++++- 2 files changed, 78 insertions(+), 5 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 2654291850..d763089082 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -3040,8 +3040,8 @@ func getIPv(ipnet *net.IPNet) string { // chain udn-bgp-drop { // comment "Drop traffic generated locally towards advertised UDN subnets" // type filter hook output priority filter; policy accept; -// ip daddr @advertised-udn-subnets-v4 counter packets 0 bytes 0 drop -// ip6 daddr @advertised-udn-subnets-v6 counter packets 0 bytes 0 drop +// ct state new ip daddr @advertised-udn-subnets-v4 counter packets 0 bytes 0 drop +// ct state new ip6 daddr @advertised-udn-subnets-v6 counter packets 0 bytes 0 drop // } func configureAdvertisedUDNIsolationNFTables() error { counterIfDebug := "" @@ -3083,11 +3083,11 @@ func configureAdvertisedUDNIsolationNFTables() error { tx.Add(&knftables.Rule{ Chain: nftablesUDNBGPOutputChain, - Rule: knftables.Concat(fmt.Sprintf("ip daddr @%s", nftablesAdvertisedUDNsSetV4), counterIfDebug, "drop"), + Rule: knftables.Concat("ct state new", fmt.Sprintf("ip daddr @%s", nftablesAdvertisedUDNsSetV4), counterIfDebug, "drop"), }) tx.Add(&knftables.Rule{ Chain: nftablesUDNBGPOutputChain, - Rule: knftables.Concat(fmt.Sprintf("ip6 daddr @%s", nftablesAdvertisedUDNsSetV6), counterIfDebug, "drop"), + Rule: knftables.Concat("ct state new", fmt.Sprintf("ip6 daddr @%s", nftablesAdvertisedUDNsSetV6), counterIfDebug, "drop"), }) return nft.Run(context.TODO(), tx) } diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index bee77d639f..f6dcdfc800 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -3,6 +3,7 @@ package e2e import ( "context" "fmt" + "math/rand" "net" "strings" @@ -19,6 +20,7 @@ import ( infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -532,7 +534,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" var svcNetA, svcNetB, svcNetDefault *corev1.Service var cudnA, cudnB *udnv1.ClusterUserDefinedNetwork var ra *rav1.RouteAdvertisements - + var hostNetworkPort int ginkgo.BeforeEach(func() { if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { e2eskipper.Skipf("Advertising Layer2 UDNs is not currently supported in LGW") @@ -584,6 +586,30 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" nodes, err = e2enode.GetReadySchedulableNodes(context.TODO(), f.ClientSet) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">", 2)) + // create host networked pod + ginkgo.By("Creating host network pods on each node") + // get random port in case the test retries and port is already in use on host node + min := 25000 + max := 25999 + hostNetworkPort = rand.Intn(max-min+1) + min + framework.Logf("Random host networked port chosen: %d", hostNetworkPort) + for _, node := range nodes.Items { + // this creates a udp / http netexec listener which is able to receive the "hostname" + // command. We use this to validate that each endpoint is received at least once + args := []string{ + "netexec", + fmt.Sprintf("--http-port=%d", hostNetworkPort), + fmt.Sprintf("--udp-port=%d", hostNetworkPort), + } + + // create host networked Pods + _, err := createPod(f, node.Name+"-hostnet-ep", node.Name, f.Namespace.Name, []string{}, map[string]string{}, func(p *v1.Pod) { + p.Spec.Containers[0].Args = args + p.Spec.HostNetwork = true + }) + + framework.ExpectNoError(err) + } ginkgo.By("Setting up pods and services") podsNetA = []*corev1.Pod{} @@ -901,6 +927,53 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), + ginkgo.Entry("UDN pod to local node should not work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + // FIXME: add the host process socket to the VRF for this test to work. + // This scenario is something that is not supported yet. So the test will continue to fail. + // This works the same on both normal UDNs and advertised UDNs. + // So because the process is not bound to the VRF, packet reaches the host but kernel sends a RESET. So its not code 28 but code7. + // 10:59:55.351067 319594f193d4d_3 P ifindex 191 0a:58:5d:5d:01:05 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 64, id 57264, + // offset 0, flags [DF], proto TCP (6), length 60) + // 93.93.1.5.36363 > 172.18.0.2.25022: Flags [S], cksum 0x0aa5 (incorrect -> 0xe0b7), seq 3879759281, win 65280, + // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 + // 10:59:55.352404 ovn-k8s-mp87 In ifindex 186 0a:58:5d:5d:01:01 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 57264, + // offset 0, flags [DF], proto TCP (6), length 60) + // 93.93.1.5.36363 > 172.18.0.2.25022: Flags [S], cksum 0xe0b7 (correct), seq 3879759281, win 65280, + // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 + // 10:59:55.352461 ovn-k8s-mp87 Out ifindex 186 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, + // offset 0, flags [DF], proto TCP (6), length 40) + // 172.18.0.2.25022 > 93.93.1.5.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 3879759282, win 0, length 0 + // 10:59:55.352927 319594f193d4d_3 Out ifindex 191 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, + // offset 0, flags [DF], proto TCP (6), length 40) + // 172.18.0.2.25022 > 93.93.1.5.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 1, win 0, length 0 + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", "", true + }), + ginkgo.Entry("UDN pod to a different node should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] and podsNetA[2] are on different nodes so we can pick the node of podsNetA[2] as the different node destination + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), podsNetA[2].Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + errBool := false + out := "" + if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { + // FIXME: fix assymmetry in L2 UDNs + // bad behaviour: packet is coming from other node -> entering eth0 -> bretho and here kernel drops the packet since + // rp_filter is set to 1 in breth0 and there is an iprule that sends the packet to mpX interface so kernel sees the packet + // having return path different from the incoming interface. + // The SNAT to nodeIP should fix this. + // this causes curl timeout with code 28 + errBool = true + out = curlConnectionTimeoutCode + } + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", out, errBool + }), ) }, From abd8e0fd8700d60c0902543f1e6b92b564e2c1e7 Mon Sep 17 00:00:00 2001 From: PGhiorzo Date: Mon, 23 Jun 2025 16:22:40 +0200 Subject: [PATCH 042/181] Modified line 277 to let kind-helm.sh run also behind a proxy Signed-off-by: PGhiorzo --- contrib/kind-helm.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/kind-helm.sh b/contrib/kind-helm.sh index c682c94ac7..8a22c0a234 100755 --- a/contrib/kind-helm.sh +++ b/contrib/kind-helm.sh @@ -274,7 +274,12 @@ build_ovn_image() { # Find all built executables, but ignore the 'windows' directory if it exists find ../../go-controller/_output/go/bin/ -maxdepth 1 -type f -exec cp -f {} . \; echo "ref: $(git rev-parse --symbolic-full-name HEAD) commit: $(git rev-parse HEAD)" > git_info - $OCI_BIN build -t "${OVN_IMAGE}" -f Dockerfile.fedora . + $OCI_BIN build \ + --build-arg http_proxy="$http_proxy" \ + --build-arg https_proxy="$https_proxy" \ + --network=host \ + -t "${OVN_IMAGE}" \ + -f Dockerfile.fedora . popd } From ff1b163cc83b072a1c534f912a92a0f4a0b0b9c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Jun 2025 04:47:39 +0000 Subject: [PATCH 043/181] Bump the go_modules group across 3 directories with 4 updates Bumps the go_modules group with 2 updates in the /go-controller directory: [golang.org/x/net](https://github.com/golang/net) and [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes). Bumps the go_modules group with 1 update in the /test/conformance directory: [golang.org/x/net](https://github.com/golang/net). Bumps the go_modules group with 3 updates in the /test/e2e directory: [golang.org/x/net](https://github.com/golang/net), [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) and [github.com/docker/docker](https://github.com/docker/docker). Updates `golang.org/x/net` from 0.30.0 to 0.38.0 - [Commits](https://github.com/golang/net/compare/v0.30.0...v0.38.0) Updates `k8s.io/kubernetes` from 1.32.3 to 1.32.6 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.32.3...v1.32.6) Updates `golang.org/x/crypto` from 0.28.0 to 0.36.0 - [Commits](https://github.com/golang/crypto/compare/v0.28.0...v0.36.0) Updates `golang.org/x/net` from 0.23.0 to 0.38.0 - [Commits](https://github.com/golang/net/compare/v0.30.0...v0.38.0) Updates `golang.org/x/net` from 0.30.0 to 0.38.0 - [Commits](https://github.com/golang/net/compare/v0.30.0...v0.38.0) Updates `k8s.io/kubernetes` from 1.32.3 to 1.32.6 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.32.3...v1.32.6) Updates `golang.org/x/crypto` from 0.35.0 to 0.36.0 - [Commits](https://github.com/golang/crypto/compare/v0.28.0...v0.36.0) Updates `github.com/docker/docker` from 26.1.4+incompatible to 26.1.5+incompatible - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v26.1.4...v26.1.5) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-version: 0.38.0 dependency-type: direct:production dependency-group: go_modules - dependency-name: k8s.io/kubernetes dependency-version: 1.32.6 dependency-type: direct:production dependency-group: go_modules - dependency-name: golang.org/x/crypto dependency-version: 0.36.0 dependency-type: indirect dependency-group: go_modules - dependency-name: golang.org/x/net dependency-version: 0.38.0 dependency-type: indirect dependency-group: go_modules - dependency-name: golang.org/x/net dependency-version: 0.38.0 dependency-type: indirect dependency-group: go_modules - dependency-name: k8s.io/kubernetes dependency-version: 1.32.6 dependency-type: direct:production dependency-group: go_modules - dependency-name: golang.org/x/crypto dependency-version: 0.36.0 dependency-type: indirect dependency-group: go_modules - dependency-name: github.com/docker/docker dependency-version: 26.1.5+incompatible dependency-type: direct:production dependency-group: go_modules ... Signed-off-by: dependabot[bot] --- go-controller/go.mod | 14 +- go-controller/go.sum | 28 +- .../golang.org/x/net/context/context.go | 112 +- .../vendor/golang.org/x/net/context/go17.go | 72 - .../vendor/golang.org/x/net/context/go19.go | 20 - .../golang.org/x/net/context/pre_go17.go | 300 ---- .../golang.org/x/net/context/pre_go19.go | 109 -- .../golang.org/x/net/html/atom/table.go | 1256 +++++++++-------- .../vendor/golang.org/x/net/html/doc.go | 7 +- .../vendor/golang.org/x/net/html/doctype.go | 2 +- .../vendor/golang.org/x/net/html/foreign.go | 3 +- .../vendor/golang.org/x/net/html/iter.go | 56 + .../vendor/golang.org/x/net/html/node.go | 4 + .../vendor/golang.org/x/net/html/parse.go | 12 +- .../vendor/golang.org/x/net/html/token.go | 18 +- .../x/net/http2/client_conn_pool.go | 8 +- .../vendor/golang.org/x/net/http2/config.go | 2 +- .../golang.org/x/net/http2/config_go124.go | 2 +- .../vendor/golang.org/x/net/http2/frame.go | 15 +- .../vendor/golang.org/x/net/http2/http2.go | 59 +- .../vendor/golang.org/x/net/http2/server.go | 185 ++- .../golang.org/x/net/http2/transport.go | 690 +++++---- .../golang.org/x/net/http2/unencrypted.go | 32 + .../vendor/golang.org/x/net/http2/write.go | 3 +- .../x/net/internal/httpcommon/ascii.go | 53 + .../httpcommon}/headermap.go | 24 +- .../x/net/internal/httpcommon/request.go | 467 ++++++ .../net/internal/socket/zsys_openbsd_ppc64.go | 28 +- .../internal/socket/zsys_openbsd_riscv64.go | 28 +- .../vendor/golang.org/x/net/proxy/per_host.go | 8 +- .../golang.org/x/net/websocket/websocket.go | 5 +- .../golang.org/x/sync/errgroup/errgroup.go | 3 +- .../golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - .../vendor/golang.org/x/sys/unix/auxv.go | 36 + .../golang.org/x/sys/unix/auxv_unsupported.go | 13 + .../golang.org/x/sys/unix/ioctl_linux.go | 96 ++ .../vendor/golang.org/x/sys/unix/mkerrors.sh | 12 + .../x/sys/unix/syscall_dragonfly.go | 12 + .../golang.org/x/sys/unix/syscall_linux.go | 1 + .../golang.org/x/sys/unix/syscall_solaris.go | 87 ++ .../x/sys/unix/syscall_zos_s390x.go | 104 +- .../golang.org/x/sys/unix/zerrors_linux.go | 51 +- .../x/sys/unix/zerrors_linux_386.go | 23 + .../x/sys/unix/zerrors_linux_amd64.go | 23 + .../x/sys/unix/zerrors_linux_arm.go | 23 + .../x/sys/unix/zerrors_linux_arm64.go | 25 + .../x/sys/unix/zerrors_linux_loong64.go | 23 + .../x/sys/unix/zerrors_linux_mips.go | 23 + .../x/sys/unix/zerrors_linux_mips64.go | 23 + .../x/sys/unix/zerrors_linux_mips64le.go | 23 + .../x/sys/unix/zerrors_linux_mipsle.go | 23 + .../x/sys/unix/zerrors_linux_ppc.go | 23 + .../x/sys/unix/zerrors_linux_ppc64.go | 23 + .../x/sys/unix/zerrors_linux_ppc64le.go | 23 + .../x/sys/unix/zerrors_linux_riscv64.go | 23 + .../x/sys/unix/zerrors_linux_s390x.go | 23 + .../x/sys/unix/zerrors_linux_sparc64.go | 23 + .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + .../x/sys/unix/zsyscall_solaris_amd64.go | 114 ++ .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + .../x/sys/unix/ztypes_darwin_amd64.go | 60 + .../x/sys/unix/ztypes_darwin_arm64.go | 60 + .../golang.org/x/sys/unix/ztypes_linux.go | 144 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 6 + .../golang.org/x/sys/windows/dll_windows.go | 11 +- .../x/sys/windows/syscall_windows.go | 36 +- .../golang.org/x/sys/windows/types_windows.go | 127 ++ .../x/sys/windows/zsyscall_windows.go | 71 + .../vendor/golang.org/x/term/README.md | 11 +- .../golang.org/x/text/language/parse.go | 2 +- go-controller/vendor/modules.txt | 27 +- test/conformance/go.mod | 10 +- test/conformance/go.sum | 20 +- test/e2e/go.mod | 16 +- test/e2e/go.sum | 32 +- 90 files changed, 3399 insertions(+), 1829 deletions(-) delete mode 100644 go-controller/vendor/golang.org/x/net/context/go17.go delete mode 100644 go-controller/vendor/golang.org/x/net/context/go19.go delete mode 100644 go-controller/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 go-controller/vendor/golang.org/x/net/context/pre_go19.go create mode 100644 go-controller/vendor/golang.org/x/net/html/iter.go create mode 100644 go-controller/vendor/golang.org/x/net/http2/unencrypted.go create mode 100644 go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go rename go-controller/vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (74%) create mode 100644 go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go delete mode 100644 go-controller/vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 go-controller/vendor/golang.org/x/sys/unix/auxv.go create mode 100644 go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go diff --git a/go-controller/go.mod b/go-controller/go.mod index a7b86b1ed1..7868b6ca26 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -47,9 +47,9 @@ require ( github.com/urfave/cli/v2 v2.27.2 github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/net v0.30.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.26.0 + golang.org/x/net v0.38.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 golang.org/x/time v0.7.0 google.golang.org/grpc v1.65.0 google.golang.org/grpc/security/advancedtls v0.0.0-20240425232638-1e8b9b7fc655 @@ -62,7 +62,7 @@ require ( k8s.io/client-go v0.32.3 k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.32.3 + k8s.io/kubernetes v1.32.6 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 kubevirt.io/api v1.0.0-alpha.0 sigs.k8s.io/controller-runtime v0.20.3 @@ -124,10 +124,10 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect diff --git a/go-controller/go.sum b/go-controller/go.sum index 93bf3489f5..3dcc3208b3 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -841,8 +841,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -934,8 +934,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -958,8 +958,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1047,14 +1047,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1064,8 +1064,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1368,8 +1368,8 @@ k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lV k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= -k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg= +k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/go-controller/vendor/golang.org/x/net/context/context.go b/go-controller/vendor/golang.org/x/net/context/context.go index cf66309c4a..db1c95fab1 100644 --- a/go-controller/vendor/golang.org/x/net/context/context.go +++ b/go-controller/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/go-controller/vendor/golang.org/x/net/context/go17.go b/go-controller/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b867937..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/go-controller/vendor/golang.org/x/net/context/go19.go b/go-controller/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a904..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/go-controller/vendor/golang.org/x/net/context/pre_go17.go b/go-controller/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa5..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/go-controller/vendor/golang.org/x/net/context/pre_go19.go b/go-controller/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a638033..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/go-controller/vendor/golang.org/x/net/html/atom/table.go b/go-controller/vendor/golang.org/x/net/html/atom/table.go index 2a938864cb..b460e6f722 100644 --- a/go-controller/vendor/golang.org/x/net/html/atom/table.go +++ b/go-controller/vendor/golang.org/x/net/html/atom/table.go @@ -11,23 +11,23 @@ const ( AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 + Action Atom = 0x26506 + Address Atom = 0x6f107 Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f + Allowfullscreen Atom = 0x3280f Allowpaymentrequest Atom = 0xc113 Allowusermedia Atom = 0xdd0e Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 + Applet Atom = 0x30806 + Area Atom = 0x35004 + Article Atom = 0x3f607 As Atom = 0x3c02 Aside Atom = 0x10705 Async Atom = 0xff05 Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c + Autocomplete Atom = 0x26b0c Autofocus Atom = 0x12109 Autoplay Atom = 0x13c08 B Atom = 0x101 @@ -43,34 +43,34 @@ const ( Br Atom = 0x202 Button Atom = 0x19106 Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 + Caption Atom = 0x22407 + Center Atom = 0x21306 + Challenge Atom = 0x28e09 Charset Atom = 0x2107 - Checked Atom = 0x47907 + Checked Atom = 0x5b507 Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 + Class Atom = 0x55805 + Code Atom = 0x5ee04 Col Atom = 0x1ab03 Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b + Content Atom = 0x57b07 + Contenteditable Atom = 0x57b0f + Contextmenu Atom = 0x37a0b Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 + Coords Atom = 0x1f006 + Crossorigin Atom = 0x1fa0b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2ab08 + Dd Atom = 0x2bf02 Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 + Defer Atom = 0x5f005 + Del Atom = 0x44c03 + Desc Atom = 0x55504 Details Atom = 0x7207 Dfn Atom = 0x8703 Dialog Atom = 0xbb06 @@ -78,106 +78,106 @@ const ( Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 + Dl Atom = 0x5d602 + Download Atom = 0x45d08 Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 + Dropzone Atom = 0x3ff08 + Dt Atom = 0x64002 Em Atom = 0x6e02 Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 + Enctype Atom = 0x28007 + Face Atom = 0x21104 + Fieldset Atom = 0x21908 + Figcaption Atom = 0x2210a + Figure Atom = 0x23b06 Font Atom = 0x3f04 Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a + For Atom = 0x24703 + ForeignObject Atom = 0x2470d + Foreignobject Atom = 0x2540d + Form Atom = 0x26104 + Formaction Atom = 0x2610a + Formenctype Atom = 0x27c0b + Formmethod Atom = 0x2970a + Formnovalidate Atom = 0x2a10e + Formtarget Atom = 0x2b30a Frame Atom = 0x8b05 Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 + H2 Atom = 0x56102 + H3 Atom = 0x2cd02 + H4 Atom = 0x2fc02 + H5 Atom = 0x33f02 + H6 Atom = 0x34902 + Head Atom = 0x32004 + Header Atom = 0x32006 + Headers Atom = 0x32007 Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 + Hgroup Atom = 0x64206 + Hidden Atom = 0x2bd06 + High Atom = 0x2ca04 Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 + Href Atom = 0x2cf04 + Hreflang Atom = 0x2cf08 Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a + HttpEquiv Atom = 0x2d70a I Atom = 0x601 - Icon Atom = 0x58a04 + Icon Atom = 0x57a04 Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 + Iframe Atom = 0x2eb06 + Image Atom = 0x2f105 + Img Atom = 0x2f603 + Input Atom = 0x44505 + Inputmode Atom = 0x44509 + Ins Atom = 0x20303 + Integrity Atom = 0x23209 Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 + Isindex Atom = 0x2fe07 + Ismap Atom = 0x30505 + Itemid Atom = 0x38506 Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 + Itemref Atom = 0x3c707 + Itemscope Atom = 0x66f09 + Itemtype Atom = 0x30e08 Kbd Atom = 0xb903 Keygen Atom = 0x3206 Keytype Atom = 0xd607 Kind Atom = 0x17704 Label Atom = 0x5905 - Lang Atom = 0x2e404 + Lang Atom = 0x2d304 Legend Atom = 0x18106 Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 + List Atom = 0x49d04 + Listing Atom = 0x49d07 Loop Atom = 0x5d04 Low Atom = 0xc303 Main Atom = 0x1004 Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 + Manifest Atom = 0x6d508 + Map Atom = 0x30703 Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 + Marquee Atom = 0x31607 + Math Atom = 0x31d04 + Max Atom = 0x33703 + Maxlength Atom = 0x33709 Media Atom = 0xe605 Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 + Menu Atom = 0x38104 + Menuitem Atom = 0x38108 + Meta Atom = 0x4ac04 Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 + Method Atom = 0x29b06 + Mglyph Atom = 0x2f706 + Mi Atom = 0x34102 + Min Atom = 0x34103 + Minlength Atom = 0x34109 + Mn Atom = 0x2a402 Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 + Ms Atom = 0x67202 + Mtext Atom = 0x34b05 + Multiple Atom = 0x35908 + Muted Atom = 0x36105 Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 @@ -185,101 +185,101 @@ const ( Noframes Atom = 0x8908 Nomodule Atom = 0xa208 Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 + Noscript Atom = 0x2c208 + Novalidate Atom = 0x2a50a + Object Atom = 0x25b06 Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 + Onafterprint Atom = 0x2290c + Onautocomplete Atom = 0x2690e + Onautocompleteerror Atom = 0x26913 + Onauxclick Atom = 0x6140a + Onbeforeprint Atom = 0x69c0d + Onbeforeunload Atom = 0x6e50e + Onblur Atom = 0x1ea06 Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 + Onchange Atom = 0x41508 + Onclick Atom = 0x2e407 + Onclose Atom = 0x36607 + Oncontextmenu Atom = 0x3780d + Oncopy Atom = 0x38b06 + Oncuechange Atom = 0x3910b + Oncut Atom = 0x39c05 + Ondblclick Atom = 0x3a10a + Ondrag Atom = 0x3ab06 + Ondragend Atom = 0x3ab09 + Ondragenter Atom = 0x3b40b + Ondragexit Atom = 0x3bf0a + Ondragleave Atom = 0x3d90b + Ondragover Atom = 0x3e40a + Ondragstart Atom = 0x3ee0b + Ondrop Atom = 0x3fd06 + Ondurationchange Atom = 0x40d10 + Onemptied Atom = 0x40409 + Onended Atom = 0x41d07 + Onerror Atom = 0x42407 + Onfocus Atom = 0x42b07 + Onhashchange Atom = 0x4370c + Oninput Atom = 0x44307 + Oninvalid Atom = 0x44f09 + Onkeydown Atom = 0x45809 + Onkeypress Atom = 0x4650a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5630a + Onpaste Atom = 0x56f07 + Onpause Atom = 0x58a07 + Onplay Atom = 0x59406 + Onplaying Atom = 0x59409 + Onpopstate Atom = 0x59d0a + Onprogress Atom = 0x5a70a + Onratechange Atom = 0x5bc0c + Onrejectionhandled Atom = 0x5c812 + Onreset Atom = 0x5da07 + Onresize Atom = 0x5e108 + Onscroll Atom = 0x5f508 + Onsecuritypolicyviolation Atom = 0x5fd19 + Onseeked Atom = 0x61e08 + Onseeking Atom = 0x62609 + Onselect Atom = 0x62f08 + Onshow Atom = 0x63906 + Onsort Atom = 0x64d06 + Onstalled Atom = 0x65709 + Onstorage Atom = 0x66009 + Onsubmit Atom = 0x66908 + Onsuspend Atom = 0x67909 Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 + Ontoggle Atom = 0x68208 + Onunhandledrejection Atom = 0x68a14 + Onunload Atom = 0x6a908 + Onvolumechange Atom = 0x6b10e + Onwaiting Atom = 0x6bf09 + Onwheel Atom = 0x6c807 Open Atom = 0x1a304 Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 + Optimum Atom = 0x6cf07 + Option Atom = 0x6e106 + Output Atom = 0x51106 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x6607 @@ -288,466 +288,468 @@ const ( Placeholder Atom = 0x1310b Plaintext Atom = 0x1b209 Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 + Poster Atom = 0x64706 + Pre Atom = 0x46a03 + Preload Atom = 0x47a07 + Progress Atom = 0x5a908 + Prompt Atom = 0x52a06 + Public Atom = 0x57606 Q Atom = 0xcf01 Radiogroup Atom = 0x30a Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 + Readonly Atom = 0x35108 + Referrerpolicy Atom = 0x3cb0e + Rel Atom = 0x47b03 + Required Atom = 0x23f08 Reversed Atom = 0x8008 Rows Atom = 0x9c04 Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 + Rp Atom = 0x22f02 Rt Atom = 0x19a02 Rtc Atom = 0x19a03 Ruby Atom = 0xfb04 S Atom = 0x2501 Samp Atom = 0x7804 Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 + Scope Atom = 0x67305 + Scoped Atom = 0x67306 + Script Atom = 0x2c406 + Seamless Atom = 0x36b08 + Search Atom = 0x55c06 + Section Atom = 0x1e507 + Select Atom = 0x63106 + Selected Atom = 0x63108 + Shape Atom = 0x1f505 + Size Atom = 0x5e504 + Sizes Atom = 0x5e505 + Slot Atom = 0x20504 + Small Atom = 0x32605 + Sortable Atom = 0x64f08 + Sorted Atom = 0x37206 + Source Atom = 0x43106 + Spacer Atom = 0x46e06 Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 + Spellcheck Atom = 0x5b00a + Src Atom = 0x5e903 + Srcdoc Atom = 0x5e906 + Srclang Atom = 0x6f707 + Srcset Atom = 0x6fe06 + Start Atom = 0x3f405 + Step Atom = 0x57304 Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 + Strong Atom = 0x6db06 + Style Atom = 0x70405 + Sub Atom = 0x66b03 + Summary Atom = 0x70907 + Sup Atom = 0x71003 + Svg Atom = 0x71303 + System Atom = 0x71606 + Tabindex Atom = 0x4b208 + Table Atom = 0x58505 + Target Atom = 0x2b706 Tbody Atom = 0x2705 Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 + Template Atom = 0x71908 + Textarea Atom = 0x34c08 Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x33005 + Thead Atom = 0x31f05 Time Atom = 0x4204 Title Atom = 0x11005 Tr Atom = 0xcc02 Track Atom = 0x1ba05 - Translate Atom = 0x1f209 + Translate Atom = 0x20809 Tt Atom = 0x6802 Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d + Typemustmatch Atom = 0x2830d U Atom = 0xb01 Ul Atom = 0xa702 Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 + Usemap Atom = 0x58e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 + Video Atom = 0x2e005 + Wbr Atom = 0x56c03 + Width Atom = 0x63e05 + Workertype Atom = 0x7210a + Wrap Atom = 0x72b04 Xmp Atom = 0x12f03 ) -const hash0 = 0x81cdf10e +const hash0 = 0x84f70e16 const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command + 0x1: 0x3ff08, // dropzone + 0x2: 0x3b08, // basefont + 0x3: 0x23209, // integrity + 0x4: 0x43106, // source + 0x5: 0x2c09, // accesskey + 0x6: 0x1a06, // accept + 0x7: 0x6c807, // onwheel + 0xb: 0x47407, // onkeyup + 0xc: 0x32007, // headers + 0xd: 0x67306, // scoped + 0xe: 0x67909, // onsuspend + 0xf: 0x8908, // noframes + 0x10: 0x1fa0b, // crossorigin + 0x11: 0x2e407, // onclick + 0x12: 0x3f405, // start + 0x13: 0x37a0b, // contextmenu + 0x14: 0x5e903, // src + 0x15: 0x1c404, // cols + 0x16: 0xbb06, // dialog + 0x17: 0x47a07, // preload + 0x18: 0x3c707, // itemref + 0x1b: 0x2f105, // image + 0x1d: 0x4ba09, // onloadend + 0x1e: 0x45d08, // download + 0x1f: 0x46a03, // pre + 0x23: 0x2970a, // formmethod + 0x24: 0x71303, // svg + 0x25: 0xcf01, // q + 0x26: 0x64002, // dt + 0x27: 0x1de08, // controls + 0x2a: 0x2804, // body + 0x2b: 0xd206, // strike + 0x2c: 0x3910b, // oncuechange + 0x2d: 0x4c30b, // onloadstart + 0x2e: 0x2fe07, // isindex + 0x2f: 0xb202, // li + 0x30: 0x1400b, // playsinline + 0x31: 0x34102, // mi + 0x32: 0x30806, // applet + 0x33: 0x4ce09, // onmessage + 0x35: 0x13702, // ol + 0x36: 0x1a304, // open + 0x39: 0x14d09, // oncanplay + 0x3a: 0x6bf09, // onwaiting + 0x3b: 0x11908, // oncancel + 0x3c: 0x6a908, // onunload + 0x3e: 0x53c09, // onoffline + 0x3f: 0x1a0e, // accept-charset + 0x40: 0x32004, // head + 0x42: 0x3ab09, // ondragend + 0x43: 0x1310b, // placeholder + 0x44: 0x2b30a, // formtarget + 0x45: 0x2540d, // foreignobject + 0x47: 0x400c, // ontimeupdate + 0x48: 0xdd0e, // allowusermedia + 0x4a: 0x69c0d, // onbeforeprint + 0x4b: 0x5604, // html + 0x4c: 0x9f04, // span + 0x4d: 0x64206, // hgroup + 0x4e: 0x16408, // disabled + 0x4f: 0x4204, // time + 0x51: 0x42b07, // onfocus + 0x53: 0xb00a, // malignmark + 0x55: 0x4650a, // onkeypress + 0x56: 0x55805, // class + 0x57: 0x1ab08, // colgroup + 0x58: 0x33709, // maxlength + 0x59: 0x5a908, // progress + 0x5b: 0x70405, // style + 0x5c: 0x2a10e, // formnovalidate + 0x5e: 0x38b06, // oncopy + 0x60: 0x26104, // form + 0x61: 0xf606, // footer + 0x64: 0x30a, // radiogroup + 0x66: 0xfb04, // ruby + 0x67: 0x4ff0b, // onmousemove + 0x68: 0x19d08, // itemprop + 0x69: 0x2d70a, // http-equiv + 0x6a: 0x15602, // th + 0x6c: 0x6e02, // em + 0x6d: 0x38108, // menuitem + 0x6e: 0x63106, // select + 0x6f: 0x48110, // onlanguagechange + 0x70: 0x31f05, // thead + 0x71: 0x15c02, // h1 + 0x72: 0x5e906, // srcdoc + 0x75: 0x9604, // name + 0x76: 0x19106, // button + 0x77: 0x55504, // desc + 0x78: 0x17704, // kind + 0x79: 0x1bf05, // color + 0x7c: 0x58e06, // usemap + 0x7d: 0x30e08, // itemtype + 0x7f: 0x6d508, // manifest + 0x81: 0x5300c, // onmousewheel + 0x82: 0x4dc0b, // onmousedown + 0x84: 0xc05, // param + 0x85: 0x2e005, // video + 0x86: 0x4910c, // onloadeddata + 0x87: 0x6f107, // address + 0x8c: 0xef04, // ping + 0x8d: 0x24703, // for + 0x8f: 0x62f08, // onselect + 0x90: 0x30703, // map + 0x92: 0xc01, // p + 0x93: 0x8008, // reversed + 0x94: 0x54d0a, // onpagehide + 0x95: 0x3206, // keygen + 0x96: 0x34109, // minlength + 0x97: 0x3e40a, // ondragover + 0x98: 0x42407, // onerror + 0x9a: 0x2107, // charset + 0x9b: 0x29b06, // method + 0x9c: 0x101, // b + 0x9d: 0x68208, // ontoggle + 0x9e: 0x2bd06, // hidden + 0xa0: 0x3f607, // article + 0xa2: 0x63906, // onshow + 0xa3: 0x64d06, // onsort + 0xa5: 0x57b0f, // contenteditable + 0xa6: 0x66908, // onsubmit + 0xa8: 0x44f09, // oninvalid + 0xaa: 0x202, // br + 0xab: 0x10902, // id + 0xac: 0x5d04, // loop + 0xad: 0x5630a, // onpageshow + 0xb0: 0x2cf04, // href + 0xb2: 0x2210a, // figcaption + 0xb3: 0x2690e, // onautocomplete + 0xb4: 0x49106, // onload + 0xb6: 0x9c04, // rows + 0xb7: 0x1a605, // nonce + 0xb8: 0x68a14, // onunhandledrejection + 0xbb: 0x21306, // center + 0xbc: 0x59406, // onplay + 0xbd: 0x33f02, // h5 + 0xbe: 0x49d07, // listing + 0xbf: 0x57606, // public + 0xc2: 0x23b06, // figure + 0xc3: 0x57a04, // icon + 0xc4: 0x1ab03, // col + 0xc5: 0x47b03, // rel + 0xc6: 0xe605, // media + 0xc7: 0x12109, // autofocus + 0xc8: 0x19a02, // rt + 0xca: 0x2d304, // lang + 0xcc: 0x49908, // datalist + 0xce: 0x2eb06, // iframe + 0xcf: 0x36105, // muted + 0xd0: 0x6140a, // onauxclick + 0xd2: 0x3c02, // as + 0xd6: 0x3fd06, // ondrop + 0xd7: 0x1c90a, // annotation + 0xd8: 0x21908, // fieldset + 0xdb: 0x2cf08, // hreflang + 0xdc: 0x4e70c, // onmouseenter + 0xdd: 0x2a402, // mn + 0xde: 0xe60a, // mediagroup + 0xdf: 0x9805, // meter + 0xe0: 0x56c03, // wbr + 0xe2: 0x63e05, // width + 0xe3: 0x2290c, // onafterprint + 0xe4: 0x30505, // ismap + 0xe5: 0x1505, // value + 0xe7: 0x1303, // nav + 0xe8: 0x54508, // ononline + 0xe9: 0xb604, // mark + 0xea: 0xc303, // low + 0xeb: 0x3ee0b, // ondragstart + 0xef: 0x12f03, // xmp + 0xf0: 0x22407, // caption + 0xf1: 0xd904, // type + 0xf2: 0x70907, // summary + 0xf3: 0x6802, // tt + 0xf4: 0x20809, // translate + 0xf5: 0x1870a, // blockquote + 0xf8: 0x15702, // hr + 0xfa: 0x2705, // tbody + 0xfc: 0x7b07, // picture + 0xfd: 0x5206, // height + 0xfe: 0x19c04, // cite + 0xff: 0x2501, // s + 0x101: 0xff05, // async + 0x102: 0x56f07, // onpaste + 0x103: 0x19507, // onabort + 0x104: 0x2b706, // target + 0x105: 0x14b03, // bdo + 0x106: 0x1f006, // coords + 0x107: 0x5e108, // onresize + 0x108: 0x71908, // template + 0x10a: 0x3a02, // rb + 0x10b: 0x2a50a, // novalidate + 0x10c: 0x460e, // updateviacache + 0x10d: 0x71003, // sup + 0x10e: 0x6c07, // noembed + 0x10f: 0x16b03, // div + 0x110: 0x6f707, // srclang + 0x111: 0x17a09, // draggable + 0x112: 0x67305, // scope + 0x113: 0x5905, // label + 0x114: 0x22f02, // rp + 0x115: 0x23f08, // required + 0x116: 0x3780d, // oncontextmenu + 0x117: 0x5e504, // size + 0x118: 0x5b00a, // spellcheck + 0x119: 0x3f04, // font + 0x11a: 0x9c07, // rowspan + 0x11b: 0x10a07, // default + 0x11d: 0x44307, // oninput + 0x11e: 0x38506, // itemid + 0x11f: 0x5ee04, // code + 0x120: 0xaa07, // acronym + 0x121: 0x3b04, // base + 0x125: 0x2470d, // foreignObject + 0x126: 0x2ca04, // high + 0x127: 0x3cb0e, // referrerpolicy + 0x128: 0x33703, // max + 0x129: 0x59d0a, // onpopstate + 0x12a: 0x2fc02, // h4 + 0x12b: 0x4ac04, // meta + 0x12c: 0x17305, // blink + 0x12e: 0x5f508, // onscroll + 0x12f: 0x59409, // onplaying + 0x130: 0xc113, // allowpaymentrequest + 0x131: 0x19a03, // rtc + 0x132: 0x72b04, // wrap + 0x134: 0x8b08, // frameset + 0x135: 0x32605, // small + 0x137: 0x32006, // header + 0x138: 0x40409, // onemptied + 0x139: 0x34902, // h6 + 0x13a: 0x35908, // multiple + 0x13c: 0x52a06, // prompt + 0x13f: 0x28e09, // challenge + 0x141: 0x4370c, // onhashchange + 0x142: 0x57b07, // content + 0x143: 0x1c90e, // annotation-xml + 0x144: 0x36607, // onclose + 0x145: 0x14d10, // oncanplaythrough + 0x148: 0x5170b, // onmouseover + 0x149: 0x64f08, // sortable + 0x14a: 0xa402, // mo + 0x14b: 0x2cd02, // h3 + 0x14c: 0x2c406, // script + 0x14d: 0x41d07, // onended + 0x14f: 0x64706, // poster + 0x150: 0x7210a, // workertype + 0x153: 0x1f505, // shape + 0x154: 0x4, // abbr + 0x155: 0x1, // a + 0x156: 0x2bf02, // dd + 0x157: 0x71606, // system + 0x158: 0x4ce0e, // onmessageerror + 0x159: 0x36b08, // seamless + 0x15a: 0x2610a, // formaction + 0x15b: 0x6e106, // option + 0x15c: 0x31d04, // math + 0x15d: 0x62609, // onseeking + 0x15e: 0x39c05, // oncut + 0x15f: 0x44c03, // del + 0x160: 0x11005, // title + 0x161: 0x11505, // audio + 0x162: 0x63108, // selected + 0x165: 0x3b40b, // ondragenter + 0x166: 0x46e06, // spacer + 0x167: 0x4a410, // onloadedmetadata + 0x168: 0x44505, // input + 0x16a: 0x58505, // table + 0x16b: 0x41508, // onchange + 0x16e: 0x5f005, // defer + 0x171: 0x50a0a, // onmouseout + 0x172: 0x20504, // slot + 0x175: 0x3704, // nobr + 0x177: 0x1d707, // command + 0x17a: 0x7207, // details + 0x17b: 0x38104, // menu + 0x17c: 0xb903, // kbd + 0x17d: 0x57304, // step + 0x17e: 0x20303, // ins + 0x17f: 0x13c08, // autoplay + 0x182: 0x34103, // min + 0x183: 0x17404, // link + 0x185: 0x40d10, // ondurationchange + 0x186: 0x9202, // td + 0x187: 0x8b05, // frame + 0x18a: 0x2ab08, // datetime + 0x18b: 0x44509, // inputmode + 0x18c: 0x35108, // readonly + 0x18d: 0x21104, // face + 0x18f: 0x5e505, // sizes + 0x191: 0x4b208, // tabindex + 0x192: 0x6db06, // strong + 0x193: 0xba03, // bdi + 0x194: 0x6fe06, // srcset + 0x196: 0x67202, // ms + 0x197: 0x5b507, // checked + 0x198: 0xb105, // align + 0x199: 0x1e507, // section + 0x19b: 0x6e05, // embed + 0x19d: 0x15e07, // bgsound + 0x1a2: 0x49d04, // list + 0x1a3: 0x61e08, // onseeked + 0x1a4: 0x66009, // onstorage + 0x1a5: 0x2f603, // img + 0x1a6: 0xf505, // tfoot + 0x1a9: 0x26913, // onautocompleteerror + 0x1aa: 0x5fd19, // onsecuritypolicyviolation + 0x1ad: 0x9303, // dir + 0x1ae: 0x9307, // dirname + 0x1b0: 0x5a70a, // onprogress + 0x1b2: 0x65709, // onstalled + 0x1b5: 0x66f09, // itemscope + 0x1b6: 0x49904, // data + 0x1b7: 0x3d90b, // ondragleave + 0x1b8: 0x56102, // h2 + 0x1b9: 0x2f706, // mglyph + 0x1ba: 0x16502, // is + 0x1bb: 0x6e50e, // onbeforeunload + 0x1bc: 0x2830d, // typemustmatch + 0x1bd: 0x3ab06, // ondrag + 0x1be: 0x5da07, // onreset + 0x1c0: 0x51106, // output + 0x1c1: 0x12907, // sandbox + 0x1c2: 0x1b209, // plaintext + 0x1c4: 0x34c08, // textarea + 0x1c7: 0xd607, // keytype + 0x1c8: 0x34b05, // mtext + 0x1c9: 0x6b10e, // onvolumechange + 0x1ca: 0x1ea06, // onblur + 0x1cb: 0x58a07, // onpause + 0x1cd: 0x5bc0c, // onratechange + 0x1ce: 0x10705, // aside + 0x1cf: 0x6cf07, // optimum + 0x1d1: 0x45809, // onkeydown + 0x1d2: 0x1c407, // colspan + 0x1d3: 0x1004, // main + 0x1d4: 0x66b03, // sub + 0x1d5: 0x25b06, // object + 0x1d6: 0x55c06, // search + 0x1d7: 0x37206, // sorted + 0x1d8: 0x17003, // big + 0x1d9: 0xb01, // u + 0x1db: 0x26b0c, // autocomplete + 0x1dc: 0xcc02, // tr + 0x1dd: 0xf303, // alt + 0x1df: 0x7804, // samp + 0x1e0: 0x5c812, // onrejectionhandled + 0x1e1: 0x4f30c, // onmouseleave + 0x1e2: 0x28007, // enctype + 0x1e3: 0xa208, // nomodule + 0x1e5: 0x3280f, // allowfullscreen + 0x1e6: 0x5f08, // optgroup + 0x1e8: 0x27c0b, // formenctype + 0x1e9: 0x18106, // legend + 0x1ea: 0x10306, // canvas + 0x1eb: 0x6607, // pattern + 0x1ec: 0x2c208, // noscript + 0x1ed: 0x601, // i + 0x1ee: 0x5d602, // dl + 0x1ef: 0xa702, // ul + 0x1f2: 0x52209, // onmouseup + 0x1f4: 0x1ba05, // track + 0x1f7: 0x3a10a, // ondblclick + 0x1f8: 0x3bf0a, // ondragexit + 0x1fa: 0x8703, // dfn + 0x1fc: 0x26506, // action + 0x1fd: 0x35004, // area + 0x1fe: 0x31607, // marquee + 0x1ff: 0x16d03, // var } const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + @@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" + "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" + + "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" + + "ignobjectformactionautocompleteerrorformenctypemustmatchalle" + + "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" + + "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" + + "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" + + "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" + + "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" + + "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" + + "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" + + "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" + + "npageshowbronpastepublicontenteditableonpausemaponplayingonp" + + "opstateonprogresspellcheckedonratechangeonrejectionhandledon" + + "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" + + "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" + + "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" + + "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" + + "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" + + "esummarysupsvgsystemplateworkertypewrap" diff --git a/go-controller/vendor/golang.org/x/net/html/doc.go b/go-controller/vendor/golang.org/x/net/html/doc.go index 3a7e5ab176..885c4c5936 100644 --- a/go-controller/vendor/golang.org/x/net/html/doc.go +++ b/go-controller/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/go-controller/vendor/golang.org/x/net/html/doctype.go b/go-controller/vendor/golang.org/x/net/html/doctype.go index c484e5a94f..bca3ae9a0c 100644 --- a/go-controller/vendor/golang.org/x/net/html/doctype.go +++ b/go-controller/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/go-controller/vendor/golang.org/x/net/html/foreign.go b/go-controller/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc42..e8515d8e88 100644 --- a/go-controller/vendor/golang.org/x/net/html/foreign.go +++ b/go-controller/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/go-controller/vendor/golang.org/x/net/html/iter.go b/go-controller/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 0000000000..54be8fd30f --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/go-controller/vendor/golang.org/x/net/html/node.go b/go-controller/vendor/golang.org/x/net/html/node.go index 1350eef22c..77741a1950 100644 --- a/go-controller/vendor/golang.org/x/net/html/node.go +++ b/go-controller/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a". - if z.err == nil && z.buf[z.raw.end-2] == '/' { + // Look for a self-closing token (e.g.
). + // + // Originally, we did this by just checking that the last character of the + // tag (ignoring the closing bracket) was a solidus (/) character, but this + // is not always accurate. + // + // We need to be careful that we don't misinterpret a non-self-closing tag + // as self-closing, as can happen if the tag contains unquoted attribute + // values (i.e.

). + // + // To avoid this, we check that the last non-bracket character of the tag + // (z.raw.end-2) isn't the same character as the last non-quote character of + // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has + // attributes. + nAttrs := len(z.attr) + if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { return SelfClosingTagToken } return StartTagToken diff --git a/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go b/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6c1..e81b73e6a7 100644 --- a/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/go-controller/vendor/golang.org/x/net/http2/config.go b/go-controller/vendor/golang.org/x/net/http2/config.go index de58dfb8dc..ca645d9a1a 100644 --- a/go-controller/vendor/golang.org/x/net/http2/config.go +++ b/go-controller/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/go-controller/vendor/golang.org/x/net/http2/config_go124.go b/go-controller/vendor/golang.org/x/net/http2/config_go124.go index e3784123c8..5b516c55ff 100644 --- a/go-controller/vendor/golang.org/x/net/http2/config_go124.go +++ b/go-controller/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/go-controller/vendor/golang.org/x/net/http2/frame.go b/go-controller/vendor/golang.org/x/net/http2/frame.go index 105c3b279c..97bd8b06f7 100644 --- a/go-controller/vendor/golang.org/x/net/http2/frame.go +++ b/go-controller/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) @@ -1490,7 +1501,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1509,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/go-controller/vendor/golang.org/x/net/http2/http2.go b/go-controller/vendor/golang.org/x/net/http2/http2.go index 7688c356b7..6c18ea230b 100644 --- a/go-controller/vendor/golang.org/x/net/http2/http2.go +++ b/go-controller/vendor/golang.org/x/net/http2/http2.go @@ -38,6 +38,15 @@ var ( logFrameWrites bool logFrameReads bool inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -50,6 +59,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false + } } const ( @@ -141,6 +153,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +166,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -397,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/go-controller/vendor/golang.org/x/net/http2/server.go b/go-controller/vendor/golang.org/x/net/http2/server.go index 617b4a4762..51fca38f61 100644 --- a/go-controller/vendor/golang.org/x/net/http2/server.go +++ b/go-controller/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -306,7 +307,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -323,12 +324,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -793,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -913,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1045,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -1782,6 +1808,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2207,19 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.Protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2233,12 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") + } + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2247,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2263,83 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -2880,6 +2874,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3229,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/go-controller/vendor/golang.org/x/net/http2/transport.go b/go-controller/vendor/golang.org/x/net/http2/transport.go index 0c5f64aa8b..f26356b9cd 100644 --- a/go-controller/vendor/golang.org/x/net/http2/transport.go +++ b/go-controller/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -324,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -335,25 +368,27 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -363,6 +398,25 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -420,12 +474,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -530,6 +584,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -562,7 +618,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -573,7 +636,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -598,6 +661,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -616,9 +695,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -752,11 +832,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } var group synctestGroupInterface if t.transportTestHooks != nil { @@ -960,7 +1042,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -992,16 +1074,40 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1014,7 +1120,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1052,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1168,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1196,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1257,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1376,6 +1432,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1387,8 +1445,11 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true } // Acquire the new-request lock by writing to reqHeaderMu. @@ -1397,6 +1458,18 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1535,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -1578,6 +1664,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1689,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1908,214 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2132,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2180,10 +2092,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2155,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2178,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2278,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2324,7 +2263,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2376,7 +2315,7 @@ func (rl *clientConnReadLoop) run() error { } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2464,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2472,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2494,15 +2433,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2577,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2686,7 +2644,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2821,9 +2779,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2917,6 +2888,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2934,6 +2920,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2942,7 +2929,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -2971,7 +2958,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3046,6 +3033,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,20 +3061,27 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3228,7 +3228,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() @@ -3265,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/go-controller/vendor/golang.org/x/net/http2/unencrypted.go b/go-controller/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 0000000000..b2de211613 --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/go-controller/vendor/golang.org/x/net/http2/write.go b/go-controller/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e9..fdb35b9477 100644 --- a/go-controller/vendor/golang.org/x/net/http2/write.go +++ b/go-controller/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 0000000000..ed14da5afc --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/go-controller/vendor/golang.org/x/net/http2/headermap.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from go-controller/vendor/golang.org/x/net/http2/headermap.go rename to go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20e..92483d8e41 100644 --- a/go-controller/vendor/golang.org/x/net/http2/headermap.go +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 0000000000..4b70553179 --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go index cebde7634f..3c9576e2d8 100644 --- a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go +++ b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go index cebde7634f..3c9576e2d8 100644 --- a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go +++ b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/go-controller/vendor/golang.org/x/net/proxy/per_host.go b/go-controller/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e3..32bdf435ec 100644 --- a/go-controller/vendor/golang.org/x/net/proxy/per_host.go +++ b/go-controller/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/go-controller/vendor/golang.org/x/net/websocket/websocket.go b/go-controller/vendor/golang.org/x/net/websocket/websocket.go index ac76165ceb..3448d20395 100644 --- a/go-controller/vendor/golang.org/x/net/websocket/websocket.go +++ b/go-controller/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go b/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63d..a4ea5d14f1 100644 --- a/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/go120.go b/go-controller/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b63..0000000000 --- a/go-controller/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go b/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434e..0000000000 --- a/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/go-controller/vendor/golang.org/x/sys/unix/auxv.go b/go-controller/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 0000000000..37a82528f5 --- /dev/null +++ b/go-controller/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 0000000000..1200487f2e --- /dev/null +++ b/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go b/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab8..7ca4fa12aa 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh b/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba0..6ab02b6c31 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c..be8c002070 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434f..230a94549a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af064..abc3955477 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1d..7bf5c04bb0 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b462489..4f432bfe8f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1240,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1325,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1546,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1618,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1799,6 +1810,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1860,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1924,6 +1938,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -1959,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2075,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2155,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2483,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2491,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2517,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2584,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2594,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2625,6 +2651,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2881,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2890,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f @@ -2948,6 +2996,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c01..75207613c7 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -237,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -283,10 +300,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -321,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f4253..c68acda535 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -237,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -284,10 +301,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -322,6 +342,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec78..a8c607ab86 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -289,10 +306,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -327,6 +347,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b7..18563dd8d3 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,15 +109,19 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -205,6 +209,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -240,6 +245,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -280,10 +299,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -318,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa0..22912cdaa9 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -238,6 +241,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -276,10 +293,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -314,6 +334,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf0..29344eb37a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a764..20d51fb96a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522a..321b60902a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c6468717..9bacdf1e27 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d639..c224272615 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -337,10 +354,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -375,6 +395,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df734191..6270c8ee13 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,10 +358,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -379,6 +399,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 2479137923..9966c1941f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,10 +358,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -379,6 +399,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146ee..848e5fcc42 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -273,10 +290,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -311,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d644396..669b2adb80 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -345,10 +362,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -383,6 +403,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1c..4834e57514 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,12 +112,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -239,6 +242,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -336,10 +353,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -422,6 +442,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da5578..5cc1e8eb2f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb8..c6545413c4 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820cb..c79aaff306 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf456..5eb450695e 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b0..05e5029744 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe88..38c53ec51b 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da0..31d2e71a18 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1eff..f4184a336b 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5ef..05b9962278 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a8..43a256e9e6 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a02..eea5ddfc22 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7ed..0d777bfbb1 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d54..b446365025 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416dad..0c7d21c188 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766f..8405391698 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825bb..fcf1b790d6 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77cd..52d15b5f9d 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d437..17c53bd9b3 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941aa..2392226a74 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e45496..a46abe6472 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2586,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3533,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3794,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3834,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3842,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4023,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4110,6 +4118,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4291,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4637,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5409,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6064,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af468..2e5d5a4435 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go b/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf633..3ca814f54d 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go b/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a3143..4a32543868 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -725,20 +727,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +888,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1684,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/go-controller/vendor/golang.org/x/sys/windows/types_windows.go b/go-controller/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c9..9d138de5fe 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/types_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -2203,6 +2204,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc01..01c0716c2c 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -275,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1606,6 +1613,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1653,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2393,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2409,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/go-controller/vendor/golang.org/x/term/README.md b/go-controller/vendor/golang.org/x/term/README.md index d03d0aefef..05ff623f94 100644 --- a/go-controller/vendor/golang.org/x/term/README.md +++ b/go-controller/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/go-controller/vendor/golang.org/x/text/language/parse.go b/go-controller/vendor/golang.org/x/text/language/parse.go index 4d57222e77..053336e286 100644 --- a/go-controller/vendor/golang.org/x/text/language/parse.go +++ b/go-controller/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 9ee1b0e2b4..a0ecf2cb4a 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -473,8 +473,8 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.28.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ed25519 @@ -482,8 +482,8 @@ golang.org/x/crypto/ed25519 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps -# golang.org/x/net v0.30.0 -## explicit; go 1.18 +# golang.org/x/net v0.38.0 +## explicit; go 1.23.0 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/html @@ -494,6 +494,7 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/icmp golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/internal/socks @@ -507,21 +508,21 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup -# golang.org/x/sys v0.26.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc -# golang.org/x/term v0.25.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.19.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -1173,7 +1174,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubernetes v1.32.3 +# k8s.io/kubernetes v1.32.6 ## explicit; go 1.23.0 k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/probe diff --git a/test/conformance/go.mod b/test/conformance/go.mod index 65883ef719..b3763a3068 100644 --- a/test/conformance/go.mod +++ b/test/conformance/go.mod @@ -1,6 +1,6 @@ module github.com/ovn-org/ovn-kubernetes/test/conformance -go 1.21 +go 1.23.0 require ( gopkg.in/yaml.v3 v3.0.1 @@ -38,11 +38,11 @@ require ( github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/test/conformance/go.sum b/test/conformance/go.sum index 14a3443c7f..1e5b55a8e9 100644 --- a/test/conformance/go.sum +++ b/test/conformance/go.sum @@ -110,8 +110,8 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -120,23 +120,23 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/test/e2e/go.mod b/test/e2e/go.mod index d1d514d1f9..95ac4ff6ae 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -12,12 +12,12 @@ require ( github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 - golang.org/x/sync v0.11.0 + golang.org/x/sync v0.12.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/klog v1.0.0 - k8s.io/kubernetes v1.32.3 + k8s.io/kubernetes v1.32.6 k8s.io/pod-security-admission v0.32.3 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -145,13 +145,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.35.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect @@ -192,7 +192,7 @@ require ( require ( github.com/containernetworking/plugins v1.2.0 github.com/coreos/butane v0.18.0 - github.com/docker/docker v26.1.4+incompatible + github.com/docker/docker v26.1.5+incompatible github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f github.com/onsi/ginkgo v1.16.5 github.com/openshift-kni/k8sreporter v1.0.6 diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 239bd56b7a..6838af0973 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -122,8 +122,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= -github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -581,8 +581,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -665,8 +665,8 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -690,8 +690,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -752,15 +752,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -771,8 +771,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1043,8 +1043,8 @@ k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/kubelet v0.32.3 h1:B9HzW4yB67flx8tN2FYuDwZvxnmK3v5EjxxFvOYjmc8= k8s.io/kubelet v0.32.3/go.mod h1:yyAQSCKC+tjSlaFw4HQG7Jein+vo+GeKBGdXdQGvL1U= -k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= -k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg= +k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= k8s.io/mount-utils v0.32.3 h1:ZPXXHblfBhYP89OnaozpFg9Ojl6HhDfxBLcdWNkaxW8= k8s.io/mount-utils v0.32.3/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0= k8s.io/pod-security-admission v0.32.3 h1:scV0PQc3PdD6sXOMHukPZOCzGCGZeVN5z999gHBpkOc= From 116ba5222c543f9ea666d09127bce9ed2f8fd1c1 Mon Sep 17 00:00:00 2001 From: Alin Gabriel Serdean Date: Thu, 12 Jun 2025 14:14:52 +0000 Subject: [PATCH 044/181] ovnkube.sh: Add new overwriting options for the gateway options and kubernetes node name This commit adds: a) options to change ovn_gateway_opts and ovn_gateway_router_subnet by a container inside the same POD. the idea is that a init container can do an IP allocation write the output to a file and we will consume those values from the file. b) in case of ovnkube in DPU mode, we are running ovnkube on behalf of a different host, however the way we identify that is using the DPU hostname. to bypass the latter we will use the OVS metadata external_ids:host-k8s-nodename. This is already used by the ovn-node (OVN central where we have a single global zone). c) extend stateless network policies for ovnkube running in different mode types: ovn-master, ovnkube-controller and ovnkube-controller-with-node. this is useful for offloading RDMA traffic. Signed-off-by: Alin Gabriel Serdean --- dist/images/ovnkube.sh | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index ae77d2f13b..bbe7f9d929 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -324,6 +324,17 @@ ovn_nohostsubnet_label=${OVN_NOHOSTSUBNET_LABEL:-""} # should be set to true when dpu nodes are in the cluster ovn_disable_requestedchassis=${OVN_DISABLE_REQUESTEDCHASSIS:-false} +# external_ids:host-k8s-nodename is set on an Open_vSwitch enabled system if the ovnkube pod +# should function on behalf of a different host than external_ids:host +# overwrite the K8S_NODE env var with the one found within the OVS metadata in this case +if [[ ${ovnkube_node_mode} == "dpu" ]]; then + K8S_NODE=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') + if [[ ${K8S_NODE} == "" ]]; then + echo "Trying to run in DPU mode and couldn't get the required Host K8s Nodename. Exiting..." + exit 1 + fi +fi + # Determine the ovn rundir. if [[ -f /usr/bin/ovn-appctl ]]; then # ovn-appctl is present. Use new ovn run dir path. @@ -1356,6 +1367,7 @@ ovn-master() { ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ ${nohostsubnet_label_option} \ + ${ovn_stateless_netpol_enable_flag} \ ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ @@ -1626,6 +1638,13 @@ ovnkube-controller() { fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + + ovn_stateless_netpol_enable_flag= + if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then + ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" + fi + echo "ovn_stateless_netpol_enable_flag: ${ovn_stateless_netpol_enable_flag}" + echo "=============== ovnkube-controller ========== MASTER ONLY" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -2054,6 +2073,11 @@ ovnkube-controller-with-node() { fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + ovn_stateless_netpol_enable_flag= + if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then + ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" + fi + echo "=============== ovnkube-controller-with-node --init-ovnkube-controller-with-node==========" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} --init-node ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -2399,8 +2423,13 @@ ovn-node() { wait_for_event ovs_ready fi - echo "=============== ovn-node - (wait for ready_to_start_node)" - wait_for_event ready_to_start_node + if [[ ${ovnkube_node_mode} != "dpu-host" ]] && [[ ${ovn_enable_interconnect} != "true" ]]; then + # ready_to_start_node checks for the NB/SB readiness state. + # This is not available on the DPU host when interconnect is enabled, + # because the DBs will run locally on the DPU + echo "=============== ovn-node - (wait for ready_to_start_node)" + wait_for_event ready_to_start_node + fi echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_conn ${ovn_nbdb_conn}" @@ -2578,12 +2607,6 @@ ovn-node() { fi if [[ ${ovnkube_node_mode} == "dpu" ]]; then - # in the case of dpu mode we want the host K8s Node Name and not the DPU K8s Node Name - K8S_NODE=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') - if [[ ${K8S_NODE} == "" ]]; then - echo "Couldn't get the required Host K8s Nodename. Exiting..." - exit 1 - fi if [[ ${ovn_gateway_opts} == "" ]]; then # get the gateway interface gw_iface=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:ovn-gw-interface | tr -d \") From 05f8d8f001974c683b1686e1a24fc8646753be67 Mon Sep 17 00:00:00 2001 From: Alin Gabriel Serdean Date: Mon, 16 Jun 2025 16:51:11 +0000 Subject: [PATCH 045/181] Add short doc update with the ovn-ic components on the DPU Signed-off-by: Alin Gabriel Serdean --- dist/images/ovnkube.sh | 19 +++++----- docs/features/hardware-offload/dpu-support.md | 36 +++++++++++++++++++ 2 files changed, 46 insertions(+), 9 deletions(-) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index bbe7f9d929..85b8eeab14 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -324,15 +324,14 @@ ovn_nohostsubnet_label=${OVN_NOHOSTSUBNET_LABEL:-""} # should be set to true when dpu nodes are in the cluster ovn_disable_requestedchassis=${OVN_DISABLE_REQUESTEDCHASSIS:-false} -# external_ids:host-k8s-nodename is set on an Open_vSwitch enabled system if the ovnkube pod -# should function on behalf of a different host than external_ids:host +# external_ids:host-k8s-nodename is set on an Open_vSwitch enabled system if the ovnkube stack +# should function on behalf of a different host than external_ids:hostname. This includes +# all the components that belond in an ovnkube stack (i.e. NB DB, SB DB, ovnkube etc) # overwrite the K8S_NODE env var with the one found within the OVS metadata in this case -if [[ ${ovnkube_node_mode} == "dpu" ]]; then - K8S_NODE=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') - if [[ ${K8S_NODE} == "" ]]; then - echo "Trying to run in DPU mode and couldn't get the required Host K8s Nodename. Exiting..." - exit 1 - fi +ovn_k8s_node=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') +if [[ ! -z $ovn_k8s_node ]]; then + echo "host-k8s-nodename is set, overriding K8S_NODE with $ovn_k8s_node" + K8S_NODE=$ovn_k8s_node fi # Determine the ovn rundir. @@ -2423,10 +2422,12 @@ ovn-node() { wait_for_event ovs_ready fi - if [[ ${ovnkube_node_mode} != "dpu-host" ]] && [[ ${ovn_enable_interconnect} != "true" ]]; then + if [[ ${ovnkube_node_mode} == "dpu-host" ]] && [[ ${ovn_enable_interconnect} == "true" ]]; then # ready_to_start_node checks for the NB/SB readiness state. # This is not available on the DPU host when interconnect is enabled, # because the DBs will run locally on the DPU + echo "skipping ready_to_start_node on DPU Host and when interconnect is true" + else echo "=============== ovn-node - (wait for ready_to_start_node)" wait_for_event ready_to_start_node fi diff --git a/docs/features/hardware-offload/dpu-support.md b/docs/features/hardware-offload/dpu-support.md index 6c098de727..bc9d731a39 100644 --- a/docs/features/hardware-offload/dpu-support.md +++ b/docs/features/hardware-offload/dpu-support.md @@ -17,3 +17,39 @@ on the embedded CPU. Any vendor that manufactures a DPU which supports the above model should work with current design. Design document can be found [here](https://docs.google.com/document/d/11IoMKiohK7hIyIE36FJmwJv46DEBx52a4fqvrpCBBcg/edit?usp=sharing). + +## OVN Kubernetes in a DPU-Accelerated Environment + +The **ovn-kubernetes** deployment will have two parts one on the host and another on the DPU side. + + +These aforementioned parts are expected to be deployed also on two different Kubernetes clusters, one for the host and another for the DPUs. + + +### Host Cluster +--- + +#### OVN Kubernetes control plane related component +- ovn-cluster-manager + +#### OVN Kubernetes components on a Standard Host (Non-DPU) +- local-nb-ovsdb +- local-sb-ovsdb +- run-ovn-northd +- ovnkube-controller-with-node +- ovn-controller +- ovs-metrics + +#### OVN Kubernetes component on a DPU-Enabled Host +- ovn-node + +### DPU Cluster +--- + +#### OVN Kubernetes components +- local-nb-ovsdb +- local-sb-ovsdb +- run-ovn-northd +- ovnkube-controller-with-node +- ovn-controller +- ovs-metrics From ee962e5cc31cd90db5bf63dafc0d41f39900ed20 Mon Sep 17 00:00:00 2001 From: Geo Turcsanyi Date: Thu, 26 Jun 2025 22:02:48 +0200 Subject: [PATCH 046/181] update documentation on deploying OVN K8s with KIND Signed-off-by: Geo Turcsanyi --- .../launching-ovn-kubernetes-on-kind.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docs/installation/launching-ovn-kubernetes-on-kind.md b/docs/installation/launching-ovn-kubernetes-on-kind.md index c3a49ddde7..5c61f3a9cd 100644 --- a/docs/installation/launching-ovn-kubernetes-on-kind.md +++ b/docs/installation/launching-ovn-kubernetes-on-kind.md @@ -18,15 +18,16 @@ KIND (Kubernetes in Docker) deployment of OVN kubernetes is a fast and easy mean - jq - openssl - openvswitch +- Go 1.23.0 or above -**NOTE :** In certain operating systems such as CentOS 8.x, pip2 and pip3 binaries are installed instead of pip. In such situations create a softlink for "pip" that points to "pip2". +**NOTE :** In certain operating systems such as CentOS 8.x, pip2 and pip3 binaries are installed instead of pip. In such situations create a softlink for "pip" that points to "pip2". For OVN kubernetes KIND deployment, use the `kind.sh` script. First Download and build the OVN-Kubernetes repo: ``` -git clone github.com/ovn-org/ovn-kubernetes; +git clone https://github.com/ovn-kubernetes/ovn-kubernetes.git; cd ovn-kubernetes ``` The `kind.sh` script builds OVN-Kubernetes into a container image. To verify @@ -53,6 +54,13 @@ $ ./kind.sh $ popd ``` +**NOTE:** If you run into issues with installing jinjanate on Ubuntu due to [PEP-0668](https://peps.python.org/pep-0668/) you can work around via: +``` +sudo apt-get install pipx +pipx install jinjanator[yaml] +pipx ensurepath +``` + ### Run the KIND deployment with podman To verify local changes, the steps are mostly the same as with docker, except the `fedora` make target: @@ -80,12 +88,14 @@ To deploy KIND however, you need to start it as root and then copy root's kube c $ pushd contrib $ sudo ./kind.sh -ep podman $ sudo cp /root/ovn.conf ~/.kube/kind-config -$ sudo chown $(id -u):$(id -g) ~/.kube/kind-config +$ sudo chown $(id -u):$(id -g) -R ~/.kube $ export KUBECONFIG=~/.kube/kind-config $ popd ``` -This will launch a KIND deployment. By default the cluster is named `ovn`. +**NOTE:** If you installed go via the official path on Linux and have encountered the "go: command not found" issue, you can preserve your environment when doing sudo: `sudo --preserve-env=PATH ./kind.sh -ep podman` + +This will launch a KIND deployment. By default, the cluster is named `ovn`. ``` $ kubectl get nodes From f1a31ed31a6444418b49f21f0d4902dd951f7258 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 7 May 2025 13:21:36 -0400 Subject: [PATCH 047/181] Add static FDB entry to OVS for shared MAC The FDB lookup is only used for non-destined shared MAC traffic. When OVN or the host send a packet that hits a NORMAL action it will initate MAC learning and can drive up the CPU of OVS. We still need NORMAL action to account for sending to unknown ports like localnet ports, but we do not want to learn the shared MAC. Therefore create a static entry binding it to the LOCAL port. Signed-off-by: Tim Rozet --- go-controller/pkg/node/gateway.go | 5 +++++ go-controller/pkg/node/gateway_init_linux_test.go | 9 +++++++++ go-controller/pkg/node/gateway_udn_test.go | 3 +++ go-controller/pkg/util/ovs.go | 13 +++++++++++++ 4 files changed, 30 insertions(+) diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 1b4544f89b..38a7ad2910 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -424,6 +424,11 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } } + // Set static FDB entry for LOCAL port + if err := util.SetStaticFDBEntry(gatewayBridge.bridgeName, gatewayBridge.bridgeName, gatewayBridge.macAddress); err != nil { + return nil, nil, err + } + l3GwConfig := util.L3GatewayConfig{ Mode: config.Gateway.Mode, ChassisID: chassisID, diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 0f6eab05ce..8bc38dcbf7 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -195,6 +195,9 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: fmt.Sprintf("%t", hwOffload), }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 " + eth0MAC, + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_node1-to-br-int ofport", Output: "5", @@ -633,6 +636,9 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + fmt.Sprintf("ovs-appctl --timeout=15 fdb/add %s %s 0 %s", brphys, brphys, hostMAC), + }) // GetDPUHostInterface fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 list-ports " + brphys, @@ -1086,6 +1092,9 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 " + eth0MAC, + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_node1-to-br-int ofport", Output: "5", diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 8c38c7ec5b..4f3e4efa67 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -171,6 +171,9 @@ func setUpGatewayFakeOVSCommands(fexec *ovntest.FakeExec) { Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 00:00:00:55:66:99", + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_worker1-to-br-int ofport", Output: "5", diff --git a/go-controller/pkg/util/ovs.go b/go-controller/pkg/util/ovs.go index ff21e828db..e6322fd710 100644 --- a/go-controller/pkg/util/ovs.go +++ b/go-controller/pkg/util/ovs.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "net" "regexp" "runtime" "strings" @@ -819,6 +820,18 @@ func DetectCheckPktLengthSupport(bridge string) (bool, error) { return false, nil } +// SetStaticFDBEntry programs a static MAC entry into the OVS FIB and disables MAC learning for this entry +func SetStaticFDBEntry(bridge, port string, mac net.HardwareAddr) error { + // Assume default VLAN for local port + vlan := "0" + stdout, stderr, err := RunOVSAppctl("fdb/add", bridge, port, vlan, mac.String()) + if err != nil { + return fmt.Errorf("failed to add FDB entry to OVS for LOCAL port, "+ + "stdout: %q, stderr: %q, error: %v", stdout, stderr, err) + } + return nil +} + // IsOvsHwOffloadEnabled checks if OvS Hardware Offload is enabled. func IsOvsHwOffloadEnabled() (bool, error) { stdout, stderr, err := RunOVSVsctl("--if-exists", "get", From 813e2800dd840c8d678c5bb79f116c5ebda2ec0d Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 26 Jun 2025 18:27:38 -0400 Subject: [PATCH 048/181] Fixes FDB learning Commit f978967 caused a regression in performance. As the below issue describes, the egress traffic from OVN will now use NORMAL action, which will cause an FDB lookup and then FLOOD if not found. This always ends up being the case because the reply ARP packet from the physical port is flooded to the patch port and the LOCAL port. This causes an increase in CPU and unnecessarily flooding packets. We need layer 2 packets destined to the shared gateway mac to go to both the host and OVN. This is so both can receive ARP replies, etc. However, we also need the FDB entry in OVS to get updated, for our new functionality with using the NORMAL action. To fix this, add a static FDB entry for LOCAL, then modify the layer 2 flooding flow actions from "output:patch,LOCAL" to "output:patch,NORMAL". Since the FDB entry is bound in the table to LOCAL, it is effectively forwarding the packets the same as before, but with the added bonus of FDB learning on ingress. Fixes: #5318 Signed-off-by: Tim Rozet --- go-controller/pkg/node/gateway_shared_intf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index d763089082..0a1dbdd0d3 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1897,7 +1897,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin for _, netConfig := range bridge.patchedNetConfigs() { actions += "output:" + netConfig.ofPortPatch + "," } - actions += strip_vlan + "output:" + ofPortHost + actions += strip_vlan + "NORMAL" dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, %s dl_dst=%s, actions=%s", defaultOpenFlowCookie, ofPortPhys, match_vlan, bridgeMacAddress, actions)) From 098a3aa7728b45a739ab569587d47e105a54d486 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Thu, 20 Mar 2025 10:07:11 +0100 Subject: [PATCH 049/181] Fix UDN nftables mark chain cleanup There is no need to flush the chain before removing. Additionaly handle the case where the chain was already removed. Signed-off-by: Patryk Diak --- go-controller/pkg/node/gateway_udn.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 7ab5b50cc9..ba299e60fd 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -310,7 +310,9 @@ func (udng *UserDefinedNetworkGateway) delMarkChain() error { chain := &knftables.Chain{ Name: GetUDNMarkChain(fmt.Sprintf("0x%x", udng.pktMark)), } - tx.Flush(chain) + // Delete would return an error if we tried to delete a chain that didn't exist, so + // we do an Add first (which is a no-op if the chain already exists) and then Delete. + tx.Add(chain) tx.Delete(chain) return nft.Run(context.TODO(), tx) } From 3735ec2d3d539999465018e36d2f6c26905d9203 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 27 Jun 2025 12:53:05 -0400 Subject: [PATCH 050/181] Remove physical port from l2 flow This allows a localnet VM arp reply to go to OVN, rather than a lookup that only hits the LOCAL port in the fdb table. Signed-off-by: Tim Rozet --- go-controller/pkg/node/gateway_shared_intf.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 0a1dbdd0d3..00c96cef1a 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1899,8 +1899,8 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin } actions += strip_vlan + "NORMAL" dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, %s dl_dst=%s, actions=%s", - defaultOpenFlowCookie, ofPortPhys, match_vlan, bridgeMacAddress, actions)) + fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", + defaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) } // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all From 0b38e623e52d6425ce639f420f7671fe0ec15371 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Mon, 30 Jun 2025 13:50:32 +0200 Subject: [PATCH 051/181] [svc controller] Stop handlers on shutdown. Before UDN services controller was only stopped together with the whole watchFactory, so there was no need to explicitly stop added event handlers. With UDN we create and delete this controller per UDN, so an explicit handler is required. Otherwise it will cause a memory leak. Signed-off-by: Nadia Pinaeva --- .../services/services_controller.go | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/go-controller/pkg/ovn/controller/services/services_controller.go b/go-controller/pkg/ovn/controller/services/services_controller.go index e03ad40b5c..b97802c85a 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller.go +++ b/go-controller/pkg/ovn/controller/services/services_controller.go @@ -168,6 +168,11 @@ type Controller struct { useTemplates bool netInfo util.NetInfo + + // handlers stored for shutdown + nodeHandler cache.ResourceEventHandlerRegistration + svcHandler cache.ResourceEventHandlerRegistration + endpointHandler cache.ResourceEventHandlerRegistration } // Run will not return until stopCh is closed. workers determines how many @@ -180,15 +185,15 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup // wait until we're told to stop <-stopCh - klog.Infof("Shutting down controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) - c.queue.ShutDown() + c.Cleanup() }() c.useLBGroups = useLBGroups c.useTemplates = useTemplates klog.Infof("Starting controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) - nodeHandler, err := c.nodeTracker.Start(c.nodeInformer) + var err error + c.nodeHandler, err = c.nodeTracker.Start(c.nodeInformer) if err != nil { return err } @@ -197,12 +202,12 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup c.startupDoneLock.Lock() c.startupDone = false c.startupDoneLock.Unlock() - if !util.WaitForHandlerSyncWithTimeout(nodeControllerName, stopCh, types.HandlerSyncTimeout, nodeHandler.HasSynced) { + if !util.WaitForHandlerSyncWithTimeout(nodeControllerName, stopCh, types.HandlerSyncTimeout, c.nodeHandler.HasSynced) { return fmt.Errorf("error syncing node tracker handler") } klog.Infof("Setting up event handlers for services for network=%s", c.netInfo.GetNetworkName()) - svcHandler, err := c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + c.svcHandler, err = c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onServiceAdd, UpdateFunc: c.onServiceUpdate, DeleteFunc: c.onServiceDelete, @@ -212,7 +217,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup } klog.Infof("Setting up event handlers for endpoint slices for network=%s", c.netInfo.GetNetworkName()) - endpointHandler, err := c.endpointSliceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace( + c.endpointHandler, err = c.endpointSliceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace( // Filter out endpointslices that don't belong to this network (i.e. keep only kube-generated endpointslices if // on default network, keep only mirrored endpointslices for this network if on UDN) util.GetEndpointSlicesEventHandlerForNetwork( @@ -227,7 +232,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup } klog.Infof("Waiting for service and endpoint handlers to sync for network=%s", c.netInfo.GetNetworkName()) - if !util.WaitForHandlerSyncWithTimeout(controllerName, stopCh, types.HandlerSyncTimeout, svcHandler.HasSynced, endpointHandler.HasSynced) { + if !util.WaitForHandlerSyncWithTimeout(controllerName, stopCh, types.HandlerSyncTimeout, c.svcHandler.HasSynced, c.endpointHandler.HasSynced) { return fmt.Errorf("error syncing service and endpoint handlers") } @@ -255,6 +260,27 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup return nil } +func (c *Controller) Cleanup() { + klog.Infof("Shutting down controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) + c.queue.ShutDown() + + if c.nodeHandler != nil { + if err := c.nodeInformer.Informer().RemoveEventHandler(c.nodeHandler); err != nil { + klog.Errorf("Failed to remove node handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } + if c.svcHandler != nil { + if err := c.serviceInformer.Informer().RemoveEventHandler(c.svcHandler); err != nil { + klog.Errorf("Failed to remove service handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } + if c.endpointHandler != nil { + if err := c.endpointSliceInformer.Informer().RemoveEventHandler(c.endpointHandler); err != nil { + klog.Errorf("Failed to remove endpoint handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } +} + // worker runs a worker thread that just dequeues items, processes them, and // marks them done. You may run as many of these in parallel as you wish; the // workqueue guarantees that they will not end up processing the same service From fa83c31b5ad6506f587434dae0cd55d7c872447f Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Sun, 29 Jun 2025 18:08:16 -0400 Subject: [PATCH 052/181] Add network QoS guide to docs navigation This commit adds a user guide doc for the OKEP-4380: Network QoS Support https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/docs/okeps/okep-4380-network-qos.md Signed-off-by: Flavio Fernandes --- docs/features/network-qos-guide.md | 329 +++++++++++++++++++++++++++++ mkdocs.yml | 4 +- 2 files changed, 332 insertions(+), 1 deletion(-) create mode 100644 docs/features/network-qos-guide.md diff --git a/docs/features/network-qos-guide.md b/docs/features/network-qos-guide.md new file mode 100644 index 0000000000..586368fb32 --- /dev/null +++ b/docs/features/network-qos-guide.md @@ -0,0 +1,329 @@ +# Guide to Using Network QoS + +## Contents + +1. [Overview](#1-overview) +2. [Create a Secondary Network (NAD)](#2-create-a-secondary-network) +3. [Define a NetworkQoS Policy](#3-define-a-networkqos-policy) +4. [Create Sample Pods and Verify the Configuration](#4-create-sample-pods-and-verify-the-configuration) +5. [Explain the NetworkQoS Object](#5-explain-the-networkqos-object) + +## **1 Overview** + +Differentiated Services Code Point (DSCP) marking and egress bandwidth metering let you prioritize or police specific traffic flows. The new **NetworkQoS** Custom Resource Definition (CRD) in [ovn-kubernetes](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/dist/templates/k8s.ovn.org_networkqoses.yaml.j2) makes both features available to Kubernetes users on **all** pod interfaces—primary or secondary—without touching pod manifests. + +This guide provides a step-by-step example of how to use this feature. Before you begin, ensure that you have a Kubernetes cluster configured with the ovn-kubernetes CNI. Since the examples use network attachments, you must run the cluster with multiple network support enabled. In a kind cluster, you would use the following flags: + +```bash +cd contrib +./kind-helm.sh -nqe -mne ; # --enable-network-qos --enable-multi-network +``` + +## **2 Create a Secondary Network** + +File: nad.yaml + +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: ovn-stream + namespace: default + labels: # label needed for NetworkQoS selector + nad-type: ovn-kubernetes-nqos +spec: + config: |2 + { + "cniVersion": "1.0.0", + "name": "ovn-stream", + "type": "ovn-k8s-cni-overlay", + "topology": "layer3", + "subnets": "10.245.0.0/16/24", + "mtu": 1300, + "master": "eth1", + "netAttachDefName": "default/ovn-stream" + } +``` +*Why the label?* `NetworkQoS` uses a label selector to find matching NADs. Without at least one label, the selector cannot match. + +## **3 Define a NetworkQoS Policy** + +File: nqos.yaml + +```yaml +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + name: qos-external + namespace: default +spec: + networkSelectors: + - networkSelectionType: NetworkAttachmentDefinitions + networkAttachmentDefinitionSelector: + namespaceSelector: {} # any namespace + networkSelector: + matchLabels: + nad-type: ovn-kubernetes-nqos + podSelector: + matchLabels: + nqos-app: bw-limited + priority: 10 # higher value wins in a tie-break + egress: + - dscp: 20 + bandwidth: + burst: 100 # kilobits + rate: 20000 # kbps + classifier: + to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.11.12.13/32 + - 172.16.0.0/12 + - 192.168.0.0/16 +``` +A full CRD template lives [here](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/dist/templates/k8s.ovn.org_networkqoses.yaml.j2). + +The `egress` field is a list, allowing you to define multiple markings and bandwidth limits based on different classifiers. + +Note that this configuration will apply to the NAD of pods based on the network selector, and only on pods that have the label `nqos-app: bw-limited`. + +```bash +$ kubectl create -f nad.yaml && \ + kubectl create -f nqos.yaml + +networkattachmentdefinition.k8s.cni.cncf.io/ovn-stream created +networkqos.k8s.ovn.org/qos-external created +``` +At this point, the output from `kubectl get networkqoses` will look like this: + +```bash +$ kubectl api-resources -owide | head -1 ; \ + kubectl api-resources -owide | grep NetworkQoS +NAME SHORTNAMES APIVERSION NAMESPACED KIND VERBS CATEGORIES +networkqoses k8s.ovn.org/v1alpha1 true NetworkQoS delete,deletecollection,get,list,patch,create,update,watch + +$ kubectl get networkqoses qos-external -n default -owide +NAME STATUS +qos-external NetworkQoS Destinations applied +``` + +## **4 Create Sample Pods and Verify the Configuration** + +### **4.1 Launch Test Pods** + +To test this, let's create a pod using a helper function that allows us to add labels to it. + +File: create_pod.source + +```bash +create_pod() { + local pod_name=${1:-pod0} + local node_name=${2:-ovn-worker} + local extra_labels=${3:-} + + NAMESPACE=$(kubectl config view --minify --output 'jsonpath={..namespace}') + NAMESPACE=${NAMESPACE:-default} + + if ! kubectl get pod "$pod_name" -n "$NAMESPACE" &>/dev/null; then + echo "Creating pod $pod_name in namespace $NAMESPACE..." + + # Prepare labels block + labels_block=" name: $pod_name" + if [[ -n "$extra_labels" ]]; then + # Convert JSON string to YAML-compatible lines + while IFS="=" read -r k v; do + labels_block+=" + $k: $v" + done < <(echo "$extra_labels" | jq -r 'to_entries|map("\(.key)=\(.value)")|.[]') + fi + + # Generate the manifest + cat </dev/null 2>&1 & +# pod1 to pod2 +nohup kubectl exec -i pod1 -- ping -c 3600 -q $DST_IP_POD2 >/dev/null 2>&1 & + +sudo dnf install -y --quiet tcpdump ; # Install tcpdump, if needed + +IPNS=$(docker inspect --format '{{ '{{' }} .State.Pid }}' ovn-worker) +sudo nsenter -t ${IPNS} -n tcpdump -envvi eth0 geneve +``` + +``` +tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes + +**Pod0 to Pod2**: Notice that since pod0 does not have the label to match against NetworkQoS, its TOS is 0. However, pod2's response is DSCP marked (tos 0x50), since pod2 matches the NetworkQoS criteria with the label `nqos-app: bw-limited`. + +12:46:30.755551 02:42:ac:12:00:06 > 02:42:ac:12:00:05, ethertype IPv4 (0x0800), length 156: (tos 0x0, ttl 64, id 26896, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.6.38210 > 172.18.0.5.geneve: [bad udp cksum 0x58bb -> 0xc87d!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 00090006] + 0a:58:0a:f5:02:01 > 0a:58:0a:f5:02:03, ethertype IPv4 (0x0800), length 98: (tos 0x0, ttl 63, id 61037, offset 0, flags [DF], proto ICMP (1), length 84) + 10.245.4.4 > 10.245.2.3: ICMP echo request, id 14, seq 44, length 64 + +— + +12:46:30.755694 02:42:ac:12:00:05 > 02:42:ac:12:00:06, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 46220, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.5.38210 > 172.18.0.6.geneve: [bad udp cksum 0x58bb -> 0xc47d!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 0004000a] + 0a:58:0a:f5:04:01 > 0a:58:0a:f5:04:04, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 45002, offset 0, flags [none], proto ICMP (1), length 84) + 10.245.2.3 > 10.245.4.4: ICMP echo reply, id 14, seq 44, length 64 + +—--------- + +**Pod1 to Pod2**: Traffic is marked both ways (both pods have the matching label) + +12:46:30.497289 02:42:ac:12:00:06 > 02:42:ac:12:00:05, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 26752, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.6.7856 > 172.18.0.5.geneve: [bad udp cksum 0x58bb -> 0x3f10!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 00090006] + 0a:58:0a:f5:02:01 > 0a:58:0a:f5:02:03, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 21760, offset 0, flags [DF], proto ICMP (1), length 84) + 10.245.4.3 > 10.245.2.3: ICMP echo request, id 14, seq 56, length 64 + +— + +12:46:30.497381 02:42:ac:12:00:05 > 02:42:ac:12:00:06, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 46019, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.5.7856 > 172.18.0.6.geneve: [bad udp cksum 0x58bb -> 0x3b11!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 0004000a] + 0a:58:0a:f5:04:01 > 0a:58:0a:f5:04:03, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 3850, offset 0, flags [none], proto ICMP (1), length 84) + 10.245.2.3 > 10.245.4.3: ICMP echo reply, id 14, seq 56, length 64 +``` + +## **5 Explain the NetworkQoS Object** + +Below is an *abbreviated* map of the CRD schema returned by `kubectl explain networkqos --recursive` (v1alpha1). Use this as a quick reference. For the definitive specification, always consult the `kubectl explain` output or the CRD YAML in the ovn-kubernetes repository. + +### **5.1 Top‑level `spec` keys** + +| Field | Type | Required | Purpose | +| ----- | ----- | ----- | ----- | +| **podSelector** | `LabelSelector` | No | Selects pods whose traffic will be evaluated by the QoS rules. If empty, all pods in the namespace are selected. | +| **networkSelectors[]** | list `NetworkSelector` | No | Restricts the rule to traffic on specific networks. If absent, the rule matches any interface. *(See §5.2)* | +| **priority** | `int` | **Yes** | Higher number → chosen first when multiple `NetworkQoS` objects match the same packet. | +| **egress[]** | list `EgressRule` | **Yes** | One or more marking / policing rules. Evaluated in the order listed. *(See §5.3)* | + +Note the square-bracket notation (`[]`) for **both** `egress` and `networkSelectors`—each is an array in the CRD. + +--- + +### **5.2 Inside a `networkSelectors[]` entry** + +Each list element tells the controller **where** the pods' egress traffic must flow in order to apply the rule. Exactly **one** selector type must be set. + +| Key | Required | Description | +| :---- | :---- | :---- | +| `networkSelectionType` | **Yes** | Enum that declares which selector below is populated. Common values: `NetworkAttachmentDefinitions`, `DefaultNetwork`, `SecondaryUserDefinedNetworks`, … | +| `networkAttachmentDefinitionSelector` | conditional | When `networkSelectionType=NetworkAttachmentDefinitions`. Selects NADs by **namespaceSelector** (required) *and* **networkSelector** (required). Both are ordinary `LabelSelectors`. | +| `secondaryUserDefinedNetworkSelector` | conditional | Used when `networkSelectionType=SecondaryUserDefinedNetworks`. Similar structure: required **namespaceSelector** & **networkSelector**. | +| `clusterUserDefinedNetworkSelector`, `primaryUserDefinedNetworkSelector` | conditional | Additional selector styles, each with required sub‑selectors as per the CRD. | + +**Typical usage** – `networkSelectionType: NetworkAttachmentDefinitions` + `networkAttachmentDefinitionSelector`. + +--- + +### **5.3 Inside an `egress[]` rule** + +| Field | Type | Required | Description | +| :---- | :---- | :---- | :---- | +| `dscp` | `int` (0 – 63) | **Yes** | DSCP value to stamp on the **inner** IP header. This value determines the traffic priority. | +| `bandwidth.rate` | `int` (kbps) | No | Sustained rate for the token-bucket policer (in kilobits per second). | +| `bandwidth.burst` | `int` (kilobits) | No | Maximum burst size that can accrue (in kilobits). | +| `classifier.to` / `classifier.from` | list `TrafficSelector` | No | CIDRs the packet destination (or source) must match. Each entry is an `ipBlock` supporting an `except` list. | +| `classifier.ports[]` | list | No | List of `{protocol, port}` tuples the packet must match; protocol is `TCP`, `UDP`, or `SCTP`. | + +If **all** specified classifier conditions match, the packet gets the DSCP mark and/or bandwidth policer defined above. This allows for fine-grained control over which traffic flows receive QoS treatment. diff --git a/mkdocs.yml b/mkdocs.yml index 87edfc2ba5..9fd08b2c08 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -126,7 +126,9 @@ nav: - MultiNetworkPolicies: features/multiple-networks/multi-network-policies.md - MultiNetworkRails: features/multiple-networks/multi-vtep.md - Multicast: features/multicast.md - - NetworkQoS: features/network-qos.md + - NetworkQoS: + - Overview: features/network-qos.md + - Usage Guide: features/network-qos-guide.md - LiveMigration: features/live-migration.md - HybridOverlay: features/hybrid-overlay.md - Hardware Acceleration: From 5ea894c86885e777983f95ac1245eff4805bbe64 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Thu, 15 May 2025 17:01:48 +0300 Subject: [PATCH 053/181] contrib, kind.sh: Fix local registry when using podman On podman push, it defaults to secure connection. In our case the local registry uses an insecure connection result in podman push failures making it impossible to work with the local registry when podman is installed. Set podman to skip secure connection check when pushing OVN-K images to the local registry. Signed-off-by: Or Mergi --- contrib/kind.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/contrib/kind.sh b/contrib/kind.sh index 8c3f6eca6d..145abc3c72 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -822,6 +822,12 @@ set_ovn_image() { } build_ovn_image() { + local push_args="" + if [ "$OCI_BIN" == "podman" ]; then + # docker doesn't perform tls check by default only podman does, hence we need to disable it for podman. + push_args="--tls-verify=false" + fi + if [ "$OVN_IMAGE" == local ]; then set_ovn_image @@ -834,14 +840,14 @@ build_ovn_image() { # store in local registry if [ "$KIND_LOCAL_REGISTRY" == true ];then echo "Pushing built image to local $OCI_BIN registry" - $OCI_BIN push "${OVN_IMAGE}" + $OCI_BIN push "$push_args" "$OVN_IMAGE" fi # We should push to local registry if image is not remote elif [ "${OVN_IMAGE}" != "" -a "${KIND_LOCAL_REGISTRY}" == true ] && (echo "$OVN_IMAGE" | grep / -vq); then local local_registry_ovn_image="localhost:5000/${OVN_IMAGE}" $OCI_BIN tag "$OVN_IMAGE" $local_registry_ovn_image OVN_IMAGE=$local_registry_ovn_image - $OCI_BIN push $OVN_IMAGE + $OCI_BIN push "$push_args" "$OVN_IMAGE" fi } From a1d47314593388f86851b32eacbde99f8cc069a0 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Thu, 15 May 2025 18:01:39 +0300 Subject: [PATCH 054/181] contrib,kind: Use skopeo to get the actual ovnkube-image digest When working with local registry, the automation inspect the built ovnkube-image digest (SHA) and pass it to the daemonset manifest, in order to ensure the latest built image is deployed. Some container runtime may not retain the same digest, result in having one image digest in the local runtime image and different one on the local registry. To avoid that and get the actual image digest that exist in the local registry, use skopeo to inspect the image and get the actual digest. This change introduce new dependency for the project. Signed-off-by: Or Mergi --- contrib/kind.sh | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/contrib/kind.sh b/contrib/kind.sh index 145abc3c72..fda6036d43 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -504,6 +504,11 @@ check_dependencies() { echo "Dependency not met: Neither docker nor podman found" exit 1 fi + + if command_exists podman && ! command_exists skopeo; then + echo "Dependency not met: skopeo not installed. Run the following command to install it: 'sudo dnf install skopeo'" + exit 1 + fi } OPENSSL="" @@ -854,8 +859,14 @@ build_ovn_image() { create_ovn_kube_manifests() { local ovnkube_image=${OVN_IMAGE} if [ "$KIND_LOCAL_REGISTRY" == true ];then - # When updating with local registry we have to reference the sha - ovnkube_image=$($OCI_BIN inspect --format='{{index .RepoDigests 0}}' $OVN_IMAGE) + # When updating with local registry we have to reference the image digest (SHA) + # Check the image digest in the local registry because it might be different then the digest in the local container runtime + if [ "$OCI_BIN" == "podman" ]; then + # due to differences how podman and docker persist images, for podman use skopeo to get the image and digest. + ovnkube_image=$(skopeo inspect --format "{{.Name}}@{{.Digest}}" --tls-verify=false "docker://$OVN_IMAGE") + else + ovnkube_image=$($OCI_BIN inspect --format='{{index .RepoDigests 0}}' $OVN_IMAGE) + fi fi pushd ${DIR}/../dist/images if [ "$OVN_ENABLE_INTERCONNECT" == true ]; then From 8a70c81d7bf7f1a91a8c0c2081f50a0f0b218272 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Mon, 16 Jun 2025 10:49:32 +0100 Subject: [PATCH 055/181] EIP OVN controller: stop pod config flap func was refactored erroneously when network comparisson was refactored. The if comparisson went from: !cachedNetwork.Equals(ni) to: util.AreNetworksCompatible(cachedNetwork, ni) Disruption can be seen for brief periods of time. Signed-off-by: Martin Kennelly --- go-controller/pkg/ovn/egressip.go | 2 +- go-controller/pkg/ovn/egressip_test.go | 178 +++++++++++++++++++++++++ 2 files changed, 179 insertions(+), 1 deletion(-) diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index e79b9b29c5..08b52dd281 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -1083,7 +1083,7 @@ func (e *EgressIPController) deletePodEgressIPAssignments(ni util.NetInfo, name func (e *EgressIPController) deletePreviousNetworkPodEgressIPAssignments(ni util.NetInfo, name string, statusesToRemove []egressipv1.EgressIPStatusItem, pod *corev1.Pod) { cachedNetwork := e.getNetworkFromPodAssignment(getPodKey(pod)) if cachedNetwork != nil { - if util.AreNetworksCompatible(cachedNetwork, ni) { + if !util.AreNetworksCompatible(cachedNetwork, ni) { if err := e.deletePodEgressIPAssignments(cachedNetwork, name, statusesToRemove, pod); err != nil { // no error is returned because high probability network is deleted klog.Errorf("Failed to delete EgressIP %s assignment for pod %s/%s attached to network %s: %v", diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index b0e5ad142a..43ec170acb 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -3410,6 +3410,184 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" ) }) + ginkgo.Context("IPv4 on pod UPDATE", func() { + ginkgo.It("does not reconfigure or remove existing pod config if no change", func() { + config.OVNKubernetesFeature.EnableInterconnect = true + app.Action = func(*cli.Context) error { + egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressNamespace := newNamespace(eipNamespace) + nodeIPv4 := "192.168.126.210/24" + egressIP := net.ParseIP("192.168.126.211") + _, nodeSubnetV4, _ := net.ParseCIDR(v4Node1Subnet) + _, nodeSubnetV6, _ := net.ParseCIDR(v6Node1Subnet) + + annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", nodeIPv4, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\"}", v4Node1Subnet, v6Node1Subnet), + "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.2/16\", \"ipv6\": \"fd97::2/64\"}", + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIPv4), + "k8s.ovn.org/zone-name": node1Name, + } + node := getNodeObj(node1Name, annotations, map[string]string{}) // add node to avoid errori-ing out on transit switch IP fetch + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node.Name + "-UUID", + Name: "k8s-" + node.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(nodeSubnetV4).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(nodeSubnetV6).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node.Name + "-UUID", + Name: node.Name, + Ports: []string{"k8s-" + node.Name + "-UUID"}, + }, + }, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, + }, + &corev1.NodeList{ + Items: []corev1.Node{node}, + }, + ) + + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{ + egressIP.String(), + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + } + i, n, _ := net.ParseCIDR(podV4IP + "/23") + n.IP = i + fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) + err := fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fakeOvn.controller.eIPC.nodeZoneState.Store(nodeName, true) + _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fakeOvn.patchEgressIPObj(node1Name, egressIPName, egressIP.String()) + gomega.Eventually(getEgressIPStatusLen(eIP.Name)).Should(gomega.Equal(1)) + + expectedDatabaseState := []libovsdbtest.TestData{ + getReRoutePolicy(egressPod.Status.PodIP, "4", "reroute-UUID", nodeLogicalRouterIPv4, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, + types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getEIPSNAT(podV4IP, egressPod.Namespace, egressPod.Name, egressIP.String(), "k8s-node1", DefaultNetworkControllerName), + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"reroute-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node.Name + "-UUID", + Name: "k8s-" + node.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(nodeSubnetV4).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(nodeSubnetV6).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node.Name + "-UUID", + Name: node.Name, + Ports: []string{"k8s-" + node.Name + "-UUID"}, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + // async, create no-op updates that trigger reconcile for the selected pod async but update should continue to select the pod and not alter pod config + // meanwhile we watch the ovn dbs and ensure they do not alter for the given pods eip config + // therefore spawn a go routine to update the k8 constructs that will trigger reconcile of the pods, and, we want to ensure nothing is reconfigured. + errCh := make(chan error, 2) + go func() { + ns, err := fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Get(context.TODO(), egressNamespace.Name, metav1.GetOptions{}) + if err != nil { + errCh <- err + return + } + // add new namespace label. Does not affect pod selection for EIP + ns = ns.DeepCopy() + ns.Labels["newlabel"] = "noop" + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + if err != nil { + errCh <- err + return + } + // add new pod label. Does not affect pod selection for EIP + pod, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressNamespace.Name).Get(context.TODO(), egressPod.Name, metav1.GetOptions{}) + if err != nil { + errCh <- err + return + } + pod = pod.DeepCopy() + pod.Labels["newlabel"] = "noop" + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressNamespace.Name).Update(context.TODO(), pod, metav1.UpdateOptions{}) + if err != nil { + errCh <- err + } + close(errCh) + }() + ginkgo.By("ensure OVN DB config for EIP remains consistent") + // ensure the DBs are unaltered + gomega.Consistently(fakeOvn.nbClient, 500*time.Millisecond, 1*time.Millisecond).WithTimeout(5 * time.Second).Should(libovsdbtest.HaveData(expectedDatabaseState)) + ginkgo.By("check for errors from goroutine updating namespace and pods") + select { + case err := <-errCh: + if err != nil { + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "must successfully update namespace and pods") + } + case <-time.After(100 * time.Millisecond): + // Updates completed successfully + } + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + ginkgo.Context("IPv6 on pod UPDATE", func() { ginkgo.DescribeTable("should remove OVN pod egress setup when EgressIP stops matching pod label", From db87df1d763800ede8a1cbf3f19fc068a4c0d1d4 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Thu, 26 Jun 2025 09:14:47 +0100 Subject: [PATCH 056/181] Layer 2 EIP: remove stale LRP if pod is remote For layer 2 support for EIP we always add a LRP to the GW router to provide load balancing (EIP HA) and pkt marking to support SNAT. For layer 2 connected pods selected by an EIP, and on the egress node, the controller may not delete GW LRP if the pod is remote. Signed-off-by: Martin Kennelly --- go-controller/pkg/ovn/egressip.go | 9 +- go-controller/pkg/ovn/egressip_udn_l2_test.go | 486 ++++++++++++++++++ 2 files changed, 494 insertions(+), 1 deletion(-) diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 08b52dd281..d53ba5e633 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -2451,11 +2451,18 @@ func (e *EgressIPController) deletePodEgressIPAssignment(ni util.NetInfo, egress return err } var ops []ovsdb.Operation - if !loadedPodNode || isLocalZonePod { // node is deleted (we can't determine zone so we always try and nuke OR pod is local to zone) + // For CDN only, add SNATs to support external GW feature + if ni.IsDefault() && (!loadedPodNode || isLocalZonePod) { ops, err = e.addExternalGWPodSNATOps(ni, nil, pod.Namespace, pod.Name, status) if err != nil { return err } + } + // Following cases will ensure removal of a pod LRP + // Case 1 - node where pod is hosted is not known + // Case 2 - pod is within the local zone + // case 3 - a local zone node is egress node and pod is attached to layer 2. For layer2, there is always an LRP attached to the egress Node GW router + if !loadedPodNode || isLocalZonePod || (isLocalZoneEgressNode && ni.IsSecondary() && ni.TopologyType() == types.Layer2Topology) { ops, err = e.deleteReroutePolicyOps(ni, ops, status, egressIPName, nextHopIP, routerName, pod.Namespace, pod.Name) if errors.Is(err, libovsdbclient.ErrNotFound) { // if the gateway router join IP setup is already gone, then don't count it as error. diff --git a/go-controller/pkg/ovn/egressip_udn_l2_test.go b/go-controller/pkg/ovn/egressip_udn_l2_test.go index 23a930b2ef..c9080d6b71 100644 --- a/go-controller/pkg/ovn/egressip_udn_l2_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l2_test.go @@ -2558,4 +2558,490 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) + + ginkgo.Context("Pod delete", func() { + ginkgo.It("should delete UDN and CDN config", func() { + // create a single EIP IP selecting multiple pods both local and remote. + // Delete pods and ensure OVN DB is as expected + app.Action = func(ctx *cli.Context) error { + // Node 1 is local, Node 2 is remote + egressIP := "192.168.126.101" + node1IPv4 := "192.168.126.202" + node1IPv4CIDR := node1IPv4 + "/24" + node2IPv4 := "192.168.126.51" + node2IPv4CIDR := node2IPv4 + "/24" + _, node1CDNSubnet, _ := net.ParseCIDR(v4Node1Subnet) + _, node1UDNSubnet, _ := net.ParseCIDR(v4Node1Net1) + nadName := util.GetNADName(eipNamespace2, nadName1) + egressCDNNamespace := newNamespaceWithLabels(eipNamespace, egressPodLabel) + egressUDNNamespace := newUDNNamespaceWithLabels(eipNamespace2, egressPodLabel) + egressPodCDNLocal := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressPodUDNLocal := *newPodWithLabels(eipNamespace2, podName2, node1Name, v4Pod1IPNode1Net1, egressPodLabel) + egressPodCDNRemote := *newPodWithLabels(eipNamespace, podName3, node2Name, podV4IP2, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodCDNRemote, ovntypes.DefaultNetworkName, fmt.Sprintf("%s%s", podV4IP2, util.GetIPFullMaskString(podV4IP2))) + egressPodUDNRemote := *newPodWithLabels(eipNamespace2, podName4, node2Name, v4Pod2IPNode2Net1, egressPodLabel) + setPrimaryNetworkAnnot(&egressPodUDNRemote, nadName, fmt.Sprintf("%s%s", v4Pod2IPNode2Net1, util.GetIPFullMaskString(v4Pod2IPNode2Net1))) + netconf := ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: networkName1, + Type: "ovn-k8s-cni-overlay", + }, + Role: ovntypes.NetworkRolePrimary, + Topology: ovntypes.Layer2Topology, + NADName: nadName, + Subnets: v4Net1, + } + nad, err := newNetworkAttachmentDefinition( + eipNamespace2, + nadName1, + netconf, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + netInfo, err := util.NewNetInfo(&netconf) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node1Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node1IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node1Subnet, networkName1, v4Node1Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), + "k8s.ovn.org/zone-name": node1Name, + "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", + "k8s.ovn.org/remote-zone-migrated": node1Name, + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": fmt.Sprintf("{\"default\":{\"ipv4\":\"%s\"},\"%s\":{\"ipv4\":\"%s\"}}", node1DefaultRtoJIPCIDR, networkName1, node1Network1RtoSIPCIDR), + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, +"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.12/24", "next-hop": "192.168.126.1", "next-hops": ["192.168.126.1"]}}`, networkName1, v4Net1, gwIP, gwIP), + } + labels := map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node1 := getNodeObj(node1Name, node1Annotations, labels) + node2Annotations := map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, ""), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\",\"%s\":\"%s\"}", v4Node2Subnet, networkName1, v4Node2Net1), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), + "k8s.ovn.org/zone-name": node2Name, + "k8s.ovn.org/node-chassis-id": "473ca66d-d800-472f-b289-1ab81ae7f21c", + "k8s.ovn.org/remote-zone-migrated": node2Name, + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": fmt.Sprintf("{\"default\":{\"ipv4\":\"%s\"},\"%s\":{\"ipv4\":\"%s\"}}", node2DefaultRtoJIPCIDR, networkName1, node2Network1RtoSIPCIDR), + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OvnNodeL3GatewayConfig: fmt.Sprintf(`{"%s":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"%s", "next-hop":"%s", "next-hops": ["%s"]}, +"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.12/24", "next-hop": "192.168.126.1", "next-hops": ["192.168.126.1"]}}`, networkName1, v4Net1, gwIP, gwIP), + } + node2 := getNodeObj(node2Name, node2Annotations, nil) + oneNodeStatus := []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: egressIP, + }, + } + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMetaWithMark(egressIPName, eIP1Mark), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: oneNodeStatus, + }, + } + + initialDB := []libovsdbtest.TestData{ + //CDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{node1DefaultRtoJIPCIDR}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + }, + // UDN start + &nbdb.LogicalRouterPort{ + UUID: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID", + Name: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName, + Networks: []string{node1Network1RtoSIPCIDR}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedSwitchName(node1.Name), + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: networkName1, ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + }, + } + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: initialDB, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressCDNNamespace, *egressUDNNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPodCDNLocal, egressPodUDNLocal, egressPodCDNRemote, egressPodUDNRemote}, + }, + &nadv1.NetworkAttachmentDefinitionList{ + Items: []nadv1.NetworkAttachmentDefinition{*nad}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{}, + }, + ) + asf := addressset.NewOvnAddressSetFactory(fakeOvn.nbClient, true, false) + // watch EgressIP depends on UDN enabled svcs address set being available + c := udnenabledsvc.NewController(fakeOvn.nbClient, asf, fakeOvn.controller.watchFactory.ServiceCoreInformer(), []string{}) + go func() { + gomega.Expect(c.Run(ctx.Done())).Should(gomega.Succeed()) + }() + // Add CDN pod IPs to CDN cache + iCDN, nCDN, _ := net.ParseCIDR(podV4IP + "/23") + nCDN.IP = iCDN + fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) + fakeOvn.controller.zone = node1Name + fakeOvn.eIPController.zone = node1Name + secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + gomega.Expect(ok).To(gomega.BeTrue()) + err = fakeOvn.eIPController.SyncLocalNodeZonesCache() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.networkManager.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer fakeOvn.networkManager.Stop() + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Add pod IPs to UDN cache + iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") + nUDN.IP = iUDN + secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) + _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) + egressIPServedPodsASUDNv4, _ := buildEgressIPServedPodsAddressSetsForController([]string{v4Pod1IPNode1Net1}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + gomega.Eventually(c.IsAddressSetAvailable).Should(gomega.BeTrue()) + dbIDs := udnenabledsvc.GetAddressSetDBIDs() + udnEnabledSvcV4, _ := addressset.GetTestDbAddrSets(dbIDs, []string{}) + node1LRP := "k8s-node1" + expectedDatabaseState := []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, node1DefaultRtoJIP), + getReRoutePolicy(podV4IP, "4", "reroute-UUID", []string{node1DefaultRtoJIP}, + getEgressIPLRPReRouteDbIDs(eIP.Name, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID", "egressip-nat2-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic", "reroute-UUID"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{node1DefaultRtoJIPCIDR}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat2-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1LRP, + Options: map[string]string{ + "stateless": "false", + }, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getReRoutePolicyForController(egressIPName, eipNamespace2, podName2, v4Pod1IPNode1Net1, eIP1Mark, IPFamilyValueV4, []string{gwIP}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getReRoutePolicyForController(egressIPName, eipNamespace2, podName4, v4Pod2IPNode2Net1, eIP1Mark, IPFamilyValueV4, []string{gwIP}, netInfo.GetNetworkName(), DefaultNetworkControllerName), + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID", + Name: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName, + Networks: []string{node1Network1RtoSIPCIDR}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", + "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName()), + getReRoutePolicyUUID(eipNamespace2, podName4, IPFamilyValueV4, netInfo.GetNetworkName()), + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: networkName1_ + layer2SwitchName + "-UUID", + Name: networkName1_ + layer2SwitchName, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + ginkgo.By("ensure expected equals actual") + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + ginkgo.By("deleting all EgressIP seelected pods") + deletePod(egressPodCDNLocal.Namespace, egressPodCDNLocal.Name, fakeOvn.fakeClient.KubeClient) + deletePod(egressPodCDNRemote.Namespace, egressPodCDNRemote.Name, fakeOvn.fakeClient.KubeClient) + deletePod(egressPodUDNLocal.Namespace, egressPodUDNLocal.Name, fakeOvn.fakeClient.KubeClient) + deletePod(egressPodUDNRemote.Namespace, egressPodUDNRemote.Name, fakeOvn.fakeClient.KubeClient) + + ginkgo.By("ensure OVN config is removed for the deleted pods") + egressIPServedPodsASCDNv4, _ = buildEgressIPServedPodsAddressSets([]string{}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) + egressIPServedPodsASUDNv4, _ = buildEgressIPServedPodsAddressSetsForController([]string{}, netInfo.GetNetworkName(), DefaultNetworkControllerName) + expectedDatabaseState = []libovsdbtest.TestData{ + // CDN + getReRouteStaticRoute(v4ClusterSubnet, node1DefaultRtoJIP), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, v4ClusterSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4ClusterSubnet, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "default-no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, + StaticRoutes: []string{"reroute-static-route-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{node1DefaultRtoJIPCIDR}, + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASCDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, ovntypes.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1Name + "-UUID", + Name: "k8s-" + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1CDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + Ports: []string{"k8s-" + node1Name + "-UUID"}, + QOSRules: []string{"default-QoS-UUID"}, + }, + getNoReRouteReplyTrafficPolicy(ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + getDefaultQoSRule(false, ovntypes.DefaultNetworkName, DefaultNetworkControllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASCDNv4, + egressNodeIPsASv4, + + // UDN + getNoReRoutePolicyForUDNEnabledSvc(false, netInfo.GetNetworkName(), DefaultNetworkControllerName, egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, udnEnabledSvcV4.Name), + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, v4Net1), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == %s && ip4.dst == %s", v4Net1, config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: ovntypes.DefaultNoRereoutePriority, + Match: fmt.Sprintf("(ip4.src == $%s || ip4.src == $%s) && ip4.dst == $%s", + egressIPServedPodsASUDNv4.Name, egressSVCServedPodsASv4.Name, egressNodeIPsASv4.Name), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "udn-default-no-reroute-node-UUID", + Options: map[string]string{"pkt_mark": ovntypes.EgressIPNodeConnectionMark}, + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, netInfo.GetNetworkName(), DefaultNetworkControllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID", + Name: ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName, + Networks: []string{node1Network1RtoSIPCIDR}, + }, + &nbdb.LogicalRouter{ + UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", + Name: netInfo.GetNetworkScopedGWRouterName(node1.Name), + Ports: []string{ovntypes.RouterToSwitchPrefix + networkName1_ + layer2SwitchName + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + Policies: []string{"udn-default-no-reroute-node-UUID", "udn-default-no-reroute-UUID", + "udn-no-reroute-service-UUID", "udn-enabled-svc-no-reroute-UUID", + fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + networkName1_ + node1Name + "-UUID", + Name: "k8s-" + networkName1_ + node1Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: networkName1_ + layer2SwitchName + "-UUID", + Name: networkName1_ + layer2SwitchName, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer2Topology}, + QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + }, + getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), + getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), + egressIPServedPodsASUDNv4, + udnEnabledSvcV4, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) }) From c58c193ed3f6c8e951361e8c8e1e0b63e8d26ab4 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 1 Jul 2025 16:13:56 +0200 Subject: [PATCH 057/181] Disable Layer2 IGMP test as it is broken now. Signed-off-by: Nadia Pinaeva --- test/e2e/network_segmentation.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index a3105f2ab0..cc8216379d 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -875,12 +875,13 @@ var _ = Describe("Network Segmentation", func() { cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), - ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ - name: nadName, - topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), - role: "primary", - }), + // TODO: this test is broken, see https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5309 + //ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ + // name: nadName, + // topology: "layer2", + // cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + // role: "primary", + //}), ) }) }) From 1ea27391de74c09fff98f2515b46ec381fbcf955 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Wed, 18 Jun 2025 11:32:01 +0000 Subject: [PATCH 058/181] Revert "Add the IP rule for a UDN only when it is advertised to the default VRF" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit deff5e64ccc6069437bf7abf08f61522f73501a1. Breaks traffic flows to KAPI, DNS on VRF-Lite scenarios. Requires and SNAT that is being worked on [1]. 1. https://issues.redhat.com/browse/OCPBUGS-56506?focusedId=27440592&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-27440592 Signed-off-by: Jaime Caamaño Ruiz --- go-controller/pkg/node/gateway_udn.go | 194 ++++++++------------ go-controller/pkg/node/gateway_udn_test.go | 200 +-------------------- 2 files changed, 79 insertions(+), 315 deletions(-) diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 7ab5b50cc9..3e2ff143c9 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -90,10 +90,6 @@ type UserDefinedNetworkGateway struct { // gwInterfaceIndex holds the link index of gateway interface gwInterfaceIndex int - - // save BGP state at the start of reconciliation loop run to handle it consistently throughout the run - isNetworkAdvertisedToDefaultVRF bool - isNetworkAdvertised bool } // UTILS Needed for UDN (also leveraged for default netInfo) in bridgeConfiguration @@ -371,18 +367,18 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { return fmt.Errorf("could not add VRF %s routes for network %s, err: %v", vrfDeviceName, udng.GetNetworkName(), err) } - udng.updateAdvertisementStatus() + isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) // create the iprules for this network - if err = udng.updateUDNVRFIPRules(); err != nil { + if err = udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { return fmt.Errorf("failed to update IP rules for network %s: %w", udng.GetNetworkName(), err) } - if err = udng.updateAdvertisedUDNIsolationRules(); err != nil { + if err = udng.updateAdvertisedUDNIsolationRules(isNetworkAdvertised); err != nil { return fmt.Errorf("failed to update isolation rules for network %s: %w", udng.GetNetworkName(), err) } - if err := udng.updateUDNVRFIPRoute(); err != nil { + if err := udng.updateUDNVRFIPRoute(isNetworkAdvertised); err != nil { return fmt.Errorf("failed to update ip routes for network %s: %w", udng.GetNetworkName(), err) } @@ -460,16 +456,18 @@ func (udng *UserDefinedNetworkGateway) DelNetwork() error { } } - err := udng.deleteAdvertisedUDNIsolationRules() - if err != nil { - return fmt.Errorf("failed to remove advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) + if util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) { + err := udng.updateAdvertisedUDNIsolationRules(false) + if err != nil { + return fmt.Errorf("failed to remove advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) + } } if err := udng.delMarkChain(); err != nil { return err } // delete the management port interface for this network - err = udng.deleteUDNManagementPort() + err := udng.deleteUDNManagementPort() if err != nil { return err } @@ -627,7 +625,8 @@ func (udng *UserDefinedNetworkGateway) computeRoutesForUDN(mpLink netlink.Link) // Route2: Add default route: default via 172.18.0.1 dev breth0 mtu 1400 // necessary for UDN CNI and host-networked pods default traffic to go to node's gatewayIP - defaultRoute, err := udng.getDefaultRouteWithAdvertisedCheck() + isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) + defaultRoute, err := udng.getDefaultRoute(isNetworkAdvertised) if err != nil { return nil, fmt.Errorf("unable to add default route for network %s, err: %v", udng.GetNetworkName(), err) } @@ -728,7 +727,15 @@ func (udng *UserDefinedNetworkGateway) computeRoutesForUDN(mpLink netlink.Link) return retVal, nil } -func (udng *UserDefinedNetworkGateway) getDefaultRoute() ([]netlink.Route, error) { +func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) ([]netlink.Route, error) { + vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) + // If the network is advertised on a non default VRF then we should only consider routes received from external BGP + // device and not send any traffic based on default route similar to one present in default VRF. This is more important + // for VRF-Lite usecase where we need traffic to leave from vlan device instead of default gateway interface. + if isNetworkAdvertised && !slices.Contains(vrfs, types.DefaultNetworkName) { + return nil, nil + } + networkMTU := udng.NetInfo.MTU() if networkMTU == 0 { networkMTU = config.Default.MTU @@ -753,16 +760,6 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute() ([]netlink.Route, error return retVal, nil } -func (udng *UserDefinedNetworkGateway) getDefaultRouteWithAdvertisedCheck() ([]netlink.Route, error) { - // If the network is advertised on a non default VRF then we should only consider routes received from external BGP - // device and not send any traffic based on default route similar to one present in default VRF. This is more important - // for VRF-Lite usecase where we need traffic to leave from vlan device instead of default gateway interface. - if udng.isNetworkAdvertised && !udng.isNetworkAdvertisedToDefaultVRF { - return nil, nil - } - return udng.getDefaultRoute() -} - // getV4MasqueradeIP returns the V4 management port masqueradeIP for this network func (udng *UserDefinedNetworkGateway) getV4MasqueradeIP() (*net.IPNet, error) { if !config.IPv4Mode { @@ -795,15 +792,12 @@ func (udng *UserDefinedNetworkGateway) getV6MasqueradeIP() (*net.IPNet, error) { // 2000: from all to 169.254.0.12 lookup 1007 // 2000: from all fwmark 0x1002 lookup 1009 // 2000: from all to 169.254.0.14 lookup 1009 -// If the network is advertised to the default VRF, an example of the rules we set for a network is: +// If the network is advertised, an example of the rules we set for a network is: // 2000: from all fwmark 0x1001 lookup 1007 // 2000: from all to 10.132.0.0/14 lookup 1007 // 2000: from all fwmark 0x1001 lookup 1009 // 2000: from all to 10.134.0.0/14 lookup 1009 -// If the network is advertised ot a non-default VRF, an example of the rules we set for a network is: -// 2000: from all fwmark 0x1001 lookup 1007 -// 2000: from all fwmark 0x1001 lookup 1009 -func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules() ([]netlink.Rule, []netlink.Rule, error) { +func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertised bool) ([]netlink.Rule, []netlink.Rule, error) { var addIPRules []netlink.Rule var delIPRules []netlink.Rule var masqIPRules []netlink.Rule @@ -836,18 +830,12 @@ func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules() ([]netlink.Rule, } } switch { - case udng.isNetworkAdvertisedToDefaultVRF: - // the network is advertised to the default VRF - delIPRules = append(delIPRules, masqIPRules...) - addIPRules = append(addIPRules, subnetIPRules...) - case udng.isNetworkAdvertised: - // the network is advertised to a non-default VRF - delIPRules = append(delIPRules, masqIPRules...) + case !isNetworkAdvertised: + addIPRules = append(addIPRules, masqIPRules...) delIPRules = append(delIPRules, subnetIPRules...) default: - // the network is not advertised - delIPRules = append(delIPRules, subnetIPRules...) - addIPRules = append(addIPRules, masqIPRules...) + addIPRules = append(addIPRules, subnetIPRules...) + delIPRules = append(delIPRules, masqIPRules...) } return addIPRules, delIPRules, nil } @@ -945,20 +933,19 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { return fmt.Errorf("openflow manager with default bridge configuration has not been provided for network %s", udng.GetNetworkName()) } - udng.updateAdvertisementStatus() - // update bridge configuration + isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) netConfig := udng.openflowManager.defaultBridge.getNetworkBridgeConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } - netConfig.advertised.Store(udng.isNetworkAdvertised) + netConfig.advertised.Store(isNetworkAdvertised) - if err := udng.updateUDNVRFIPRules(); err != nil { + if err := udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating ip rule for UDN %s: %s", udng.GetNetworkName(), err) } - if err := udng.updateUDNVRFIPRoute(); err != nil { + if err := udng.updateUDNVRFIPRoute(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating ip route for UDN %s: %s", udng.GetNetworkName(), err) } @@ -972,16 +959,16 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // let's sync these flows immediately udng.openflowManager.requestFlowSync() - if err := udng.updateAdvertisedUDNIsolationRules(); err != nil { + if err := udng.updateAdvertisedUDNIsolationRules(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating advertised UDN isolation rules for network %s: %w", udng.GetNetworkName(), err) } return nil } // updateUDNVRFIPRules updates IP rules for a network depending on whether the -// network is advertised to the default VRF or not -func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules() error { - addIPRules, deleteIPRules, err := udng.constructUDNVRFIPRules() +// network is advertised or not +func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules(isNetworkAdvertised bool) error { + addIPRules, deleteIPRules, err := udng.constructUDNVRFIPRules(isNetworkAdvertised) if err != nil { return fmt.Errorf("unable to get iprules for network %s, err: %v", udng.GetNetworkName(), err) } @@ -1000,40 +987,30 @@ func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRules() error { } // Add or remove default route from a vrf device based on the network is -// advertised on its own network or the default network -func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRoute() error { - vrfName := util.GetNetworkVRFName(udng.NetInfo) - - switch { - case udng.isNetworkAdvertised && !udng.isNetworkAdvertisedToDefaultVRF: - // Remove default route for networks advertised to non-default VRF +// advertised on its own network or default network +func (udng *UserDefinedNetworkGateway) updateUDNVRFIPRoute(isNetworkAdvertised bool) error { + vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) + if isNetworkAdvertised && !slices.Contains(vrfs, types.DefaultNetworkName) { if err := udng.removeDefaultRouteFromVRF(); err != nil { - return fmt.Errorf("failed to remove default route from VRF %s for network %s: %v", - vrfName, udng.GetNetworkName(), err) + return fmt.Errorf("error while removing default route from VRF %s corresponding to network %s: %s", + util.GetNetworkVRFName(udng.NetInfo), udng.GetNetworkName(), err) } - - default: - // Add default route for networks that are either: - // - not advertised - // - advertised to default VRF - defaultRoute, err := udng.getDefaultRouteWithAdvertisedCheck() + } else if !isNetworkAdvertised || slices.Contains(vrfs, types.DefaultNetworkName) { + defaultRoute, err := udng.getDefaultRoute(isNetworkAdvertised) if err != nil { - return fmt.Errorf("failed to get default route for network %s: %v", - udng.GetNetworkName(), err) + return fmt.Errorf("unable to get default route for network %s, err: %v", udng.GetNetworkName(), err) } - - if err = udng.vrfManager.AddVRFRoutes(vrfName, defaultRoute); err != nil { - return fmt.Errorf("failed to add default route to VRF %s for network %s: %v", - vrfName, udng.GetNetworkName(), err) + if err = udng.vrfManager.AddVRFRoutes(util.GetNetworkVRFName(udng.NetInfo), defaultRoute); err != nil { + return fmt.Errorf("error while adding default route to VRF %s corresponding to network %s, err: %v", + util.GetNetworkVRFName(udng.NetInfo), udng.GetNetworkName(), err) } } - return nil } func (udng *UserDefinedNetworkGateway) removeDefaultRouteFromVRF() error { vrfDeviceName := util.GetNetworkVRFName(udng.NetInfo) - defaultRoute, err := udng.getDefaultRoute() + defaultRoute, err := udng.getDefaultRoute(false) if err != nil { return fmt.Errorf("unable to get default route for network %s, err: %v", udng.GetNetworkName(), err) } @@ -1062,22 +1039,39 @@ func (udng *UserDefinedNetworkGateway) removeDefaultRouteFromVRF() error { // comment "advertised UDNs V4 subnets" // elements = { 10.10.0.0/16 comment "cluster_udn_l3network" } // } -func (udng *UserDefinedNetworkGateway) updateAdvertisedUDNIsolationRules() error { - switch { - case udng.isNetworkAdvertised: - return udng.addAdvertisedUDNIsolationRules() - default: - return udng.deleteAdvertisedUDNIsolationRules() - } -} - -func (udng *UserDefinedNetworkGateway) addAdvertisedUDNIsolationRules() error { +func (udng *UserDefinedNetworkGateway) updateAdvertisedUDNIsolationRules(isNetworkAdvertised bool) error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return fmt.Errorf("failed to get nftables helper: %v", err) } tx := nft.NewTransaction() + if !isNetworkAdvertised { + existingV4, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) + if err != nil { + if !knftables.IsNotFound(err) { + return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV4, err) + } + } + existingV6, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV6) + if err != nil { + if !knftables.IsNotFound(err) { + return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV6, err) + } + } + + for _, elem := range append(existingV4, existingV6...) { + if elem.Comment != nil && *elem.Comment == udng.GetNetworkName() { + tx.Delete(elem) + } + } + + if tx.NumOperations() == 0 { + return nil + } + return nft.Run(context.TODO(), tx) + } + for _, udnNet := range udng.Subnets() { set := nftablesAdvertisedUDNsSetV4 if utilnet.IsIPv6CIDR(udnNet.CIDR) { @@ -1096,41 +1090,3 @@ func (udng *UserDefinedNetworkGateway) addAdvertisedUDNIsolationRules() error { } return nft.Run(context.TODO(), tx) } - -func (udng *UserDefinedNetworkGateway) deleteAdvertisedUDNIsolationRules() error { - nft, err := nodenft.GetNFTablesHelper() - if err != nil { - return fmt.Errorf("failed to get nftables helper: %v", err) - } - tx := nft.NewTransaction() - - existingV4, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) - if err != nil { - if !knftables.IsNotFound(err) { - return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV4, err) - } - } - existingV6, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV6) - if err != nil { - if !knftables.IsNotFound(err) { - return fmt.Errorf("could not list existing items in %s set: %w", nftablesAdvertisedUDNsSetV6, err) - } - } - - for _, elem := range append(existingV4, existingV6...) { - if elem.Comment != nil && *elem.Comment == udng.GetNetworkName() { - tx.Delete(elem) - } - } - - if tx.NumOperations() == 0 { - return nil - } - return nft.Run(context.TODO(), tx) -} - -func (udng *UserDefinedNetworkGateway) updateAdvertisementStatus() { - vrfs := udng.GetPodNetworkAdvertisedOnNodeVRFs(udng.node.Name) - udng.isNetworkAdvertised = len(vrfs) > 0 - udng.isNetworkAdvertisedToDefaultVRF = slices.Contains(vrfs, types.DefaultNetworkName) -} diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 8c38c7ec5b..9f66247599 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -1754,7 +1754,7 @@ func TestConstructUDNVRFIPRules(t *testing.T) { }) g.Expect(err).NotTo(HaveOccurred()) udnGateway.vrfTableId = test.vrftableID - rules, delRules, err := udnGateway.constructUDNVRFIPRules() + rules, delRules, err := udnGateway.constructUDNVRFIPRules(false) g.Expect(err).ToNot(HaveOccurred()) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) @@ -1776,7 +1776,7 @@ func TestConstructUDNVRFIPRules(t *testing.T) { } } -func TestConstructUDNVRFIPRulesPodNetworkAdvertisedToTheDefaultNetwork(t *testing.T) { +func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { type testRule struct { priority int family int @@ -1941,198 +1941,7 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertisedToTheDefaultNetwork(t *testin }) g.Expect(err).NotTo(HaveOccurred()) udnGateway.vrfTableId = test.vrftableID - udnGateway.isNetworkAdvertised = true - udnGateway.isNetworkAdvertisedToDefaultVRF = true - rules, delRules, err := udnGateway.constructUDNVRFIPRules() - g.Expect(err).ToNot(HaveOccurred()) - for i, rule := range rules { - g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) - g.Expect(rule.Table).To(Equal(test.expectedRules[i].table)) - g.Expect(rule.Family).To(Equal(test.expectedRules[i].family)) - if rule.Dst != nil { - g.Expect(*rule.Dst).To(Equal(test.expectedRules[i].dst)) - } else { - g.Expect(rule.Mark).To(Equal(test.expectedRules[i].mark)) - } - } - for i, rule := range delRules { - g.Expect(rule.Priority).To(Equal(test.deleteRules[i].priority)) - g.Expect(rule.Table).To(Equal(test.deleteRules[i].table)) - g.Expect(rule.Family).To(Equal(test.deleteRules[i].family)) - g.Expect(*rule.Dst).To(Equal(test.deleteRules[i].dst)) - } - }) - } -} - -func TestConstructUDNVRFIPRulesPodNetworkAdvertisedToNoneDefaultNetwork(t *testing.T) { - type testRule struct { - priority int - family int - table int - mark uint32 - dst net.IPNet - } - type testConfig struct { - desc string - vrftableID int - v4mode bool - v6mode bool - expectedRules []testRule - deleteRules []testRule - } - - tests := []testConfig{ - { - desc: "v4 rule test", - vrftableID: 1007, - expectedRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1007, - mark: 0x1003, - }, - }, - deleteRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1007, - dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("169.254.0.16")), - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1007, - dst: *ovntest.MustParseIPNet("100.128.0.0/16"), - }, - }, - v4mode: true, - }, - { - desc: "v6 rule test", - vrftableID: 1009, - expectedRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1009, - mark: 0x1003, - }, - }, - deleteRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1009, - dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("fd69::10")), - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1009, - dst: *ovntest.MustParseIPNet("ae70::/60"), - }, - }, - v6mode: true, - }, - { - desc: "dualstack rule test", - vrftableID: 1010, - expectedRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1010, - mark: 0x1003, - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1010, - mark: 0x1003, - }, - }, - deleteRules: []testRule{ - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1010, - dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("169.254.0.16")), - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1010, - dst: *util.GetIPNetFullMaskFromIP(ovntest.MustParseIP("fd69::10")), - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V4, - table: 1010, - dst: *ovntest.MustParseIPNet("100.128.0.0/16"), - }, - { - priority: UDNMasqueradeIPRulePriority, - family: netlink.FAMILY_V6, - table: 1010, - dst: *ovntest.MustParseIPNet("ae70::/60"), - }, - }, - v4mode: true, - v6mode: true, - }, - } - config.Gateway.V6MasqueradeSubnet = "fd69::/112" - config.Gateway.V4MasqueradeSubnet = "169.254.0.0/16" - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - g := NewWithT(t) - node := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - } - config.IPv4Mode = test.v4mode - config.IPv6Mode = test.v6mode - cidr := "" - if config.IPv4Mode { - cidr = "100.128.0.0/16/24" - } - if config.IPv4Mode && config.IPv6Mode { - cidr += ",ae70::/60" - } else if config.IPv6Mode { - cidr = "ae70::/60" - } - nad := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", - types.Layer3Topology, cidr, types.NetworkRolePrimary) - ovntest.AnnotateNADWithNetworkID("3", nad) - netInfo, err := util.ParseNADInfo(nad) - g.Expect(err).ToNot(HaveOccurred()) - mutableNetInfo := util.NewMutableNetInfo(netInfo) - mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{node.Name: {"bluenet"}}) - ofm := getDummyOpenflowManager() - // create dummy gateway interface(Need to run this test as root) - err = netlink.LinkAdd(&netlink.Dummy{ - LinkAttrs: netlink.LinkAttrs{ - Name: "breth0", - }, - }) - g.Expect(err).NotTo(HaveOccurred()) - udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, nil, nil, &gateway{openflowManager: ofm}) - g.Expect(err).NotTo(HaveOccurred()) - // delete dummy gateway interface after creating UDN gateway(Need to run this test as root) - err = netlink.LinkDel(&netlink.Dummy{ - LinkAttrs: netlink.LinkAttrs{ - Name: "breth0", - }, - }) - g.Expect(err).NotTo(HaveOccurred()) - udnGateway.vrfTableId = test.vrftableID - udnGateway.isNetworkAdvertised = true - udnGateway.isNetworkAdvertisedToDefaultVRF = false - rules, delRules, err := udnGateway.constructUDNVRFIPRules() + rules, delRules, err := udnGateway.constructUDNVRFIPRules(true) g.Expect(err).ToNot(HaveOccurred()) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) @@ -2263,8 +2072,7 @@ func TestUserDefinedNetworkGateway_updateAdvertisedUDNIsolationRules(t *testing. udng := &UserDefinedNetworkGateway{ NetInfo: netInfo, } - udng.isNetworkAdvertised = tt.isNetworkAdvertised - err = udng.updateAdvertisedUDNIsolationRules() + err = udng.updateAdvertisedUDNIsolationRules(tt.isNetworkAdvertised) g.Expect(err).NotTo(HaveOccurred()) v4Elems, err := nft.ListElements(context.TODO(), "set", nftablesAdvertisedUDNsSetV4) From b0b32b37f701f0ec6d6746b67d8b8b4f797b4a86 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Thu, 3 Jul 2025 16:11:36 +0300 Subject: [PATCH 059/181] kind: Rm push_args variable quotes When using Docker, push image command fails because the push_args var is interpreted as empty string, Docker reject it as invalid variable and fails with the following error: $ docker push '' localhost:5000/ovn-daemonset-fedora:latest docker: 'docker push' requires 1 argument Remove the push_args wrapping quotes. Signed-off-by: Or Mergi --- contrib/kind.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kind.sh b/contrib/kind.sh index 5ec980bd95..3d8bd0f30e 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -845,14 +845,14 @@ build_ovn_image() { # store in local registry if [ "$KIND_LOCAL_REGISTRY" == true ];then echo "Pushing built image to local $OCI_BIN registry" - $OCI_BIN push "$push_args" "$OVN_IMAGE" + $OCI_BIN push $push_args "$OVN_IMAGE" fi # We should push to local registry if image is not remote elif [ "${OVN_IMAGE}" != "" -a "${KIND_LOCAL_REGISTRY}" == true ] && (echo "$OVN_IMAGE" | grep / -vq); then local local_registry_ovn_image="localhost:5000/${OVN_IMAGE}" $OCI_BIN tag "$OVN_IMAGE" $local_registry_ovn_image OVN_IMAGE=$local_registry_ovn_image - $OCI_BIN push "$push_args" "$OVN_IMAGE" + $OCI_BIN push $push_args "$OVN_IMAGE" fi } From 2d2e4454d7debe348161063d1e9e48dddaa8a621 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Fri, 13 Jun 2025 09:51:14 +0200 Subject: [PATCH 060/181] Reconcile namespace for network change Since CanServeNamespace filters out namespace events for namespaces unknown to be served by this primary network, we need to reconcile namespaces once the network is reconfigured to serve a namespace. Hence this commit reconciles those namespaces and also reconciles each network policy if it contains only peer namespace selector. Signed-off-by: Periyasamy Palanisamy --- .../pkg/ovn/base_network_controller.go | 72 ++++++++++---- .../pkg/ovn/base_network_controller_policy.go | 96 +++++++++++++++++-- go-controller/pkg/ovn/gress_policy.go | 7 ++ test/e2e/network_segmentation_policy.go | 86 +++++++++++++++-- 4 files changed, 228 insertions(+), 33 deletions(-) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index db56f42cb9..bdb026752a 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -191,8 +191,7 @@ type BaseNetworkController struct { func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed func(string)) error { // gather some information first - var err error - var retryNodes []*corev1.Node + var reconcileNodes []string oc.localZoneNodes.Range(func(key, _ any) bool { nodeName := key.(string) wasAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, nodeName) @@ -201,41 +200,57 @@ func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed f // noop return true } - var node *corev1.Node - node, err = oc.watchFactory.GetNode(nodeName) - if err != nil { - return false - } - retryNodes = append(retryNodes, node) + reconcileNodes = append(reconcileNodes, nodeName) return true }) - if err != nil { - return fmt.Errorf("failed to reconcile network %s: %w", oc.GetNetworkName(), err) - } reconcileRoutes := oc.routeImportManager != nil && oc.routeImportManager.NeedsReconciliation(netInfo) reconcilePendingPods := !oc.IsDefault() && !oc.ReconcilableNetInfo.EqualNADs(netInfo.GetNADs()...) + reconcileNamespaces := sets.NewString() + if oc.IsPrimaryNetwork() { + // since CanServeNamespace filters out namespace events for namespaces unknown + // to be served by this primary network, we need to reconcile namespaces once + // the network is reconfigured to serve a namespace. + reconcileNamespaces = sets.NewString(netInfo.GetNADNamespaces()...).Difference( + sets.NewString(oc.GetNADNamespaces()...)) + } // set the new NetInfo, point of no return - err = util.ReconcileNetInfo(oc.ReconcilableNetInfo, netInfo) + err := util.ReconcileNetInfo(oc.ReconcilableNetInfo, netInfo) if err != nil { return fmt.Errorf("failed to reconcile network information for network %s: %v", oc.GetNetworkName(), err) } + oc.doReconcile(reconcileRoutes, reconcilePendingPods, reconcileNodes, setNodeFailed, reconcileNamespaces.List()) + + return nil +} + +// doReconcile performs the reconciliation after the controller NetInfo has already being +// updated with the changes. What needs to be reconciled should already be known and +// provided on the arguments of the method. This method returns no error and logs them +// instead since once the controller NetInfo has been updated there is no point in retrying. +func (oc *BaseNetworkController) doReconcile(reconcileRoutes, reconcilePendingPods bool, + reconcileNodes []string, setNodeFailed func(string), reconcileNamespaces []string) { if reconcileRoutes { - err = oc.routeImportManager.ReconcileNetwork(oc.GetNetworkName()) + err := oc.routeImportManager.ReconcileNetwork(oc.GetNetworkName()) if err != nil { klog.Errorf("Failed to reconcile network %s on route import controller: %v", oc.GetNetworkName(), err) } } - for _, node := range retryNodes { - setNodeFailed(node.Name) + for _, nodeName := range reconcileNodes { + setNodeFailed(nodeName) + node, err := oc.watchFactory.GetNode(nodeName) + if err != nil { + klog.Infof("Failed to get node %s for reconciling network %s: %v", nodeName, oc.GetNetworkName(), err) + continue + } err = oc.retryNodes.AddRetryObjWithAddNoBackoff(node) if err != nil { - klog.Errorf("Failed to retry node %s for network %s: %v", node.Name, oc.GetNetworkName(), err) + klog.Errorf("Failed to retry node %s for network %s: %v", nodeName, oc.GetNetworkName(), err) } } - if len(retryNodes) > 0 { + if len(reconcileNodes) > 0 { oc.retryNodes.RequestRetryObjs() } @@ -245,7 +260,28 @@ func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed f } } - return nil + namespaceAdded := false + for _, ns := range reconcileNamespaces { + namespace, err := oc.watchFactory.GetNamespace(ns) + if err != nil { + klog.Infof("Failed to get namespace %s for reconciling network %s: %v", ns, oc.GetNetworkName(), err) + continue + } + err = oc.retryNamespaces.AddRetryObjWithAddNoBackoff(namespace) + if err != nil { + klog.Infof("Failed to retry namespace %s for network %s: %v", ns, oc.GetNetworkName(), err) + continue + } + namespaceAdded = true + } + if namespaceAdded { + oc.retryNamespaces.RequestRetryObjs() + } + + err := oc.requeuePeerNamespaces(reconcileNamespaces) + if err != nil { + klog.Infof("Failed to retry network policy peer namespaces for network %s: %v", oc.GetNetworkName(), err) + } } // BaseSecondaryNetworkController structure holds per-network fields and network specific diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index f4c10bfacf..4d9fd61781 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -23,6 +24,7 @@ import ( libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -162,6 +164,8 @@ type networkPolicy struct { localPodHandler *factory.Handler // peer namespace handlers nsHandlerList []*factory.Handler + // peer namespace reconcilers + reconcilePeerNamespaces []*reconcilePeerNamespaces // peerAddressSets stores PodSelectorAddressSet keys for peers that this network policy was successfully added to. // Required for cleanup. peerAddressSets []string @@ -186,17 +190,23 @@ type networkPolicy struct { cancelableContext *util.CancelableContext } +type reconcilePeerNamespaces struct { + retryNamespaces *retry.RetryFramework + namespaceSelector *metav1.LabelSelector +} + func NewNetworkPolicy(policy *knet.NetworkPolicy) *networkPolicy { policyTypeIngress, policyTypeEgress := getPolicyType(policy) np := &networkPolicy{ - name: policy.Name, - namespace: policy.Namespace, - ingressPolicies: make([]*gressPolicy, 0), - egressPolicies: make([]*gressPolicy, 0), - isIngress: policyTypeIngress, - isEgress: policyTypeEgress, - nsHandlerList: make([]*factory.Handler, 0), - localPods: sync.Map{}, + name: policy.Name, + namespace: policy.Namespace, + ingressPolicies: make([]*gressPolicy, 0), + egressPolicies: make([]*gressPolicy, 0), + isIngress: policyTypeIngress, + isEgress: policyTypeEgress, + nsHandlerList: make([]*factory.Handler, 0), + reconcilePeerNamespaces: make([]*reconcilePeerNamespaces, 0), + localPods: sync.Map{}, } return np } @@ -1490,6 +1500,63 @@ func (bnc *BaseNetworkController) peerNamespaceUpdate(np *networkPolicy, gp *gre return err } +// requeuePeerNamespaces enqueues the namespace into network policy peer namespace +// retry framework object(s) which need to be retried immediately with add event. +func (bnc *BaseNetworkController) requeuePeerNamespaces(namespaces []string) error { + npKeys := bnc.networkPolicies.GetKeys() + var errors []error + for _, npKey := range npKeys { + err := bnc.networkPolicies.DoWithLock(npKey, func(npKey string) error { + np, ok := bnc.networkPolicies.Load(npKey) + if !ok { + return nil + } + np.RLock() + defer np.RUnlock() + var errors []error + for _, reconcilePeerNamespace := range np.reconcilePeerNamespaces { + namespaceAdded := false + for _, ns := range namespaces { + namespace, err := bnc.watchFactory.GetNamespace(ns) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retrieve peer namespace %s for network policy %s on network %s: %w", + ns, npKey, bnc.GetNetworkName(), err)) + continue + } + namespaceLabels := labels.Set(namespace.Labels) + peerNamespaceSelector, err := metav1.LabelSelectorAsSelector(reconcilePeerNamespace.namespaceSelector) + if err != nil { + errors = append(errors, fmt.Errorf("failed to parse peer namespace %s selector for network policy %s on network %s: %w", + ns, npKey, bnc.GetNetworkName(), err)) + continue + } + // Filter out namespace when it's labels not matching with network policy peer namespace + // selector. + if !peerNamespaceSelector.Matches(namespaceLabels) { + continue + } + err = reconcilePeerNamespace.retryNamespaces.AddRetryObjWithAddNoBackoff(namespace) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retry peer namespace %s for network policy %s on network %s: %w", + ns, npKey, bnc.GetNetworkName(), err)) + continue + } + namespaceAdded = true + } + if namespaceAdded { + reconcilePeerNamespace.retryNamespaces.RequestRetryObjs() + } + } + return utilerrors.Join(errors...) + }) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retry peer namespaces for network policy %s on network %s: %w", + npKey, bnc.GetNetworkName(), err)) + } + } + return utilerrors.Join(errors...) +} + // addPeerNamespaceHandler starts a watcher for PeerNamespaceSelectorType. // Sync function and Add event for every existing namespace will be executed sequentially first, and an error will be // returned if something fails. @@ -1522,7 +1589,17 @@ func (bnc *BaseNetworkController) addPeerNamespaceHandler( klog.Errorf("WatchResource failed for addPeerNamespaceHandler: %v", err) return err } - + // Add peer namespace retry framework object into np.retryPeerNamespaces list so that + // when a new peer namespace is newly created later under UDN network, it gets reconciled + // and address set is created for the namespace. so we must reconcile it for network policy + // as well to update gress policy ACL with matching peer namespace address set. + if util.IsNetworkSegmentationSupportEnabled() && bnc.IsPrimaryNetwork() { + np.Lock() + np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, + &reconcilePeerNamespaces{retryNamespaces: retryPeerNamespaces, + namespaceSelector: namespaceSelector}) + np.Unlock() + } np.nsHandlerList = append(np.nsHandlerList, namespaceHandler) return nil } @@ -1540,6 +1617,7 @@ func (bnc *BaseNetworkController) shutdownHandlers(np *networkPolicy) { for _, handler := range np.nsHandlerList { bnc.watchFactory.RemoveNamespaceHandler(handler) } + np.reconcilePeerNamespaces = make([]*reconcilePeerNamespaces, 0) np.nsHandlerList = make([]*factory.Handler, 0) } diff --git a/go-controller/pkg/ovn/gress_policy.go b/go-controller/pkg/ovn/gress_policy.go index c8445e6ed5..bc55cfb689 100644 --- a/go-controller/pkg/ovn/gress_policy.go +++ b/go-controller/pkg/ovn/gress_policy.go @@ -209,6 +209,10 @@ func (gp *gressPolicy) addNamespaceAddressSet(name string, asf addressset.Addres return false, fmt.Errorf("cannot add peer namespace %s: failed to get address set: %v", name, err) } v4HashName, v6HashName := as.GetASHashNames() + if v4HashName == "" && v6HashName == "" { + // This would happen when a namespace is not yet reconciled with UDN network. + return false, fmt.Errorf("cannot add peer namespace %s: address set has empty hashed name", name) + } v4HashName = "$" + v4HashName v6HashName = "$" + v6HashName @@ -234,6 +238,9 @@ func (gp *gressPolicy) addNamespaceAddressSet(name string, asf addressset.Addres func (gp *gressPolicy) delNamespaceAddressSet(name string) bool { dbIDs := getNamespaceAddrSetDbIDs(name, gp.controllerName) v4HashName, v6HashName := addressset.GetHashNamesForAS(dbIDs) + if v4HashName == "" && v6HashName == "" { + return false + } v4HashName = "$" + v4HashName v6HashName = "$" + v6HashName diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 30bc1dc0a5..8abc3d6791 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -35,6 +35,8 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ randomStringLength = 5 nameSpaceYellowSuffix = "yellow" namespaceBlueSuffix = "blue" + namespaceRedSuffix = "red" + namespaceOrangeSuffix = "orange" ) var ( @@ -57,7 +59,10 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) - for _, namespace := range []string{namespaceYellow, namespaceBlue} { + namespaceRed := getNamespaceName(f, namespaceRedSuffix) + namespaceOrange := getNamespaceName(f, namespaceOrangeSuffix) + for _, namespace := range []string{namespaceYellow, namespaceBlue, + namespaceRed, namespaceOrange} { ginkgo.By("Creating namespace " + namespace) ns, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -180,11 +185,13 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) + namespaceRed := getNamespaceName(f, namespaceRedSuffix) + namespaceOrange := getNamespaceName(f, namespaceOrangeSuffix) nad := networkAttachmentConfigParams{ topology: topology, cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), - // Both yellow and blue namespaces are going to served by green network. + // The yellow, blue and red namespaces are going to served by green network. // Use random suffix for the network name to avoid race between tests. networkName: fmt.Sprintf("%s-%s", "green", rand.String(randomStringLength)), role: "primary", @@ -258,8 +265,8 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) - ginkgo.By("creating a \"allow-traffic-to-pod\" network policy") - _, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, "allow-traffic-to-pod", allowServerPodLabel) + ginkgo.By("creating a \"allow-traffic-to-pod\" network policy for blue and red namespace") + _, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, namespaceRed, "allow-traffic-to-pod", allowServerPodLabel) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint") @@ -272,6 +279,72 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + // Create client pod in red namespace and check network policy is working. + ginkgo.By("creating client pod in red namespace and check if it is in pending state until NAD is created") + clientPodConfig.namespace = namespaceRed + podSpec := generatePodSpec(clientPodConfig) + _, err = cs.CoreV1().Pods(clientPodConfig.namespace).Create( + context.Background(), + podSpec, + metav1.CreateOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Consistently(func() v1.PodPhase { + updatedPod, err := cs.CoreV1().Pods(clientPodConfig.namespace).Get(context.Background(), + clientPodConfig.name, metav1.GetOptions{}) + if err != nil { + return v1.PodFailed + } + return updatedPod.Status.Phase + }, 1*time.Minute, 6*time.Second).Should(gomega.Equal(v1.PodPending)) + + ginkgo.By("creating NAD for red and orange namespaces and check pod moves into running state") + for _, namespace := range []string{namespaceRed, namespaceOrange} { + ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) + netConfig := newNetworkAttachmentConfig(nad) + netConfig.namespace = namespace + netConfig.name = netConfName + + _, err := nadClient.NetworkAttachmentDefinitions(namespace).Create( + context.Background(), + generateNAD(netConfig), + metav1.CreateOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Eventually(func() v1.PodPhase { + updatedPod, err := cs.CoreV1().Pods(clientPodConfig.namespace).Get(context.Background(), + clientPodConfig.name, metav1.GetOptions{}) + if err != nil { + return v1.PodFailed + } + return updatedPod.Status.Phase + }, 1*time.Minute, 6*time.Second).Should(gomega.Equal(v1.PodRunning)) + + ginkgo.By("asserting the *red client* pod can contact the allow server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, allowServerPodConfig, clientPodConfig, allowServerPodIP, port) + }, 1*time.Minute, 6*time.Second).Should(gomega.Succeed()) + + ginkgo.By("asserting the *red client* pod can not contact deny server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + + // Create client pod in orange namespace now and check network policy is working. + ginkgo.By("creating client pod in orange namespace") + clientPodConfig.namespace = namespaceOrange + runUDNPod(cs, namespaceOrange, clientPodConfig, nil) + + ginkgo.By("asserting the *orange client* pod can not contact the allow server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, allowServerPodConfig, clientPodConfig, allowServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + + ginkgo.By("asserting the *orange client* pod can not contact deny server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) }, ginkgo.Entry( "in L2 primary UDN", @@ -328,7 +401,7 @@ func getNamespaceName(f *framework.Framework, nsSuffix string) string { return fmt.Sprintf("%s-%s", f.Namespace.Name, nsSuffix) } -func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) { +func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace1, fromNamespace2, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) { policy := &knet.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: policyName, @@ -337,7 +410,8 @@ func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fro PodSelector: metav1.LabelSelector{MatchLabels: podLabel}, PolicyTypes: []knet.PolicyType{knet.PolicyTypeIngress}, Ingress: []knet.NetworkPolicyIngressRule{{From: []knet.NetworkPolicyPeer{ - {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace}}}}}}, + {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace1}}}, + {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace2}}}}}}, }, } return f.ClientSet.NetworkingV1().NetworkPolicies(namespace).Create(context.TODO(), policy, metav1.CreateOptions{}) From 96db6fd1f211477012f989d4e87f9eb3d3b6e1f4 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Wed, 18 Jun 2025 14:49:55 +0200 Subject: [PATCH 061/181] Use Handler FilterFunc to filter out np peer namespace This commits exports FilterFunc from handler and uses it while reconciling network policy for UDN peer namespaces. Signed-off-by: Periyasamy Palanisamy --- go-controller/pkg/factory/handler.go | 4 + .../pkg/ovn/base_network_controller_policy.go | 77 ++++++++----------- 2 files changed, 38 insertions(+), 43 deletions(-) diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index 1e87f7309b..50563b3278 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -76,6 +76,10 @@ func (h *Handler) OnDelete(obj interface{}) { } } +func (h *Handler) FilterFunc(obj interface{}) bool { + return h.base.FilterFunc(obj) +} + func (h *Handler) kill() bool { return atomic.CompareAndSwapUint32(&h.tombstone, handlerAlive, handlerDead) } diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index 4d9fd61781..1bb39137cf 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -10,7 +10,6 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -162,10 +161,8 @@ type networkPolicy struct { // network policy owns only 1 local pod handler localPodHandler *factory.Handler - // peer namespace handlers - nsHandlerList []*factory.Handler // peer namespace reconcilers - reconcilePeerNamespaces []*reconcilePeerNamespaces + reconcilePeerNamespaces []*peerNamespacesRetry // peerAddressSets stores PodSelectorAddressSet keys for peers that this network policy was successfully added to. // Required for cleanup. peerAddressSets []string @@ -190,9 +187,9 @@ type networkPolicy struct { cancelableContext *util.CancelableContext } -type reconcilePeerNamespaces struct { - retryNamespaces *retry.RetryFramework - namespaceSelector *metav1.LabelSelector +type peerNamespacesRetry struct { + retryFramework *retry.RetryFramework + handler *factory.Handler } func NewNetworkPolicy(policy *knet.NetworkPolicy) *networkPolicy { @@ -204,8 +201,7 @@ func NewNetworkPolicy(policy *knet.NetworkPolicy) *networkPolicy { egressPolicies: make([]*gressPolicy, 0), isIngress: policyTypeIngress, isEgress: policyTypeEgress, - nsHandlerList: make([]*factory.Handler, 0), - reconcilePeerNamespaces: make([]*reconcilePeerNamespaces, 0), + reconcilePeerNamespaces: make([]*peerNamespacesRetry, 0), localPods: sync.Map{}, } return np @@ -1503,8 +1499,18 @@ func (bnc *BaseNetworkController) peerNamespaceUpdate(np *networkPolicy, gp *gre // requeuePeerNamespaces enqueues the namespace into network policy peer namespace // retry framework object(s) which need to be retried immediately with add event. func (bnc *BaseNetworkController) requeuePeerNamespaces(namespaces []string) error { - npKeys := bnc.networkPolicies.GetKeys() var errors []error + var peerNamespaces []*corev1.Namespace + for _, ns := range namespaces { + namespace, err := bnc.watchFactory.GetNamespace(ns) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retrieve namespace %s for reconciling network %s: %w", + ns, bnc.GetNetworkName(), err)) + continue + } + peerNamespaces = append(peerNamespaces, namespace) + } + npKeys := bnc.networkPolicies.GetKeys() for _, npKey := range npKeys { err := bnc.networkPolicies.DoWithLock(npKey, func(npKey string) error { np, ok := bnc.networkPolicies.Load(npKey) @@ -1516,35 +1522,22 @@ func (bnc *BaseNetworkController) requeuePeerNamespaces(namespaces []string) err var errors []error for _, reconcilePeerNamespace := range np.reconcilePeerNamespaces { namespaceAdded := false - for _, ns := range namespaces { - namespace, err := bnc.watchFactory.GetNamespace(ns) - if err != nil { - errors = append(errors, fmt.Errorf("failed to retrieve peer namespace %s for network policy %s on network %s: %w", - ns, npKey, bnc.GetNetworkName(), err)) - continue - } - namespaceLabels := labels.Set(namespace.Labels) - peerNamespaceSelector, err := metav1.LabelSelectorAsSelector(reconcilePeerNamespace.namespaceSelector) - if err != nil { - errors = append(errors, fmt.Errorf("failed to parse peer namespace %s selector for network policy %s on network %s: %w", - ns, npKey, bnc.GetNetworkName(), err)) - continue - } + for _, namespace := range peerNamespaces { // Filter out namespace when it's labels not matching with network policy peer namespace // selector. - if !peerNamespaceSelector.Matches(namespaceLabels) { + if !reconcilePeerNamespace.handler.FilterFunc(namespace) { continue } - err = reconcilePeerNamespace.retryNamespaces.AddRetryObjWithAddNoBackoff(namespace) + err := reconcilePeerNamespace.retryFramework.AddRetryObjWithAddNoBackoff(namespace) if err != nil { errors = append(errors, fmt.Errorf("failed to retry peer namespace %s for network policy %s on network %s: %w", - ns, npKey, bnc.GetNetworkName(), err)) + namespace.Name, npKey, bnc.GetNetworkName(), err)) continue } namespaceAdded = true } if namespaceAdded { - reconcilePeerNamespace.retryNamespaces.RequestRetryObjs() + reconcilePeerNamespace.retryFramework.RequestRetryObjs() } } return utilerrors.Join(errors...) @@ -1589,18 +1582,17 @@ func (bnc *BaseNetworkController) addPeerNamespaceHandler( klog.Errorf("WatchResource failed for addPeerNamespaceHandler: %v", err) return err } - // Add peer namespace retry framework object into np.retryPeerNamespaces list so that - // when a new peer namespace is newly created later under UDN network, it gets reconciled - // and address set is created for the namespace. so we must reconcile it for network policy + + // Add peer namespace retry framework object into np.reconcilePeerNamespaces so that when + // a new peer namespace is newly created later under UDN network, it gets reconciled and + // address set is created for the namespace. so we must reconcile it for network policy // as well to update gress policy ACL with matching peer namespace address set. - if util.IsNetworkSegmentationSupportEnabled() && bnc.IsPrimaryNetwork() { - np.Lock() - np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, - &reconcilePeerNamespaces{retryNamespaces: retryPeerNamespaces, - namespaceSelector: namespaceSelector}) - np.Unlock() - } - np.nsHandlerList = append(np.nsHandlerList, namespaceHandler) + np.Lock() + np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, + &peerNamespacesRetry{retryFramework: retryPeerNamespaces, + handler: namespaceHandler}) + np.Unlock() + return nil } @@ -1614,11 +1606,10 @@ func (bnc *BaseNetworkController) shutdownHandlers(np *networkPolicy) { bnc.watchFactory.RemovePodHandler(np.localPodHandler) np.localPodHandler = nil } - for _, handler := range np.nsHandlerList { - bnc.watchFactory.RemoveNamespaceHandler(handler) + for _, retry := range np.reconcilePeerNamespaces { + bnc.watchFactory.RemoveNamespaceHandler(retry.handler) } - np.reconcilePeerNamespaces = make([]*reconcilePeerNamespaces, 0) - np.nsHandlerList = make([]*factory.Handler, 0) + np.reconcilePeerNamespaces = make([]*peerNamespacesRetry, 0) } // The following 2 functions should return the same key for network policy based on k8s on internal networkPolicy object From f792af555c4b7b7b161a753083ed48fb1a3bff7c Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Tue, 24 Jun 2025 13:27:36 +0200 Subject: [PATCH 062/181] Use namespace reconcilation loop for syncing network policies This commit makes network reconcilation loop to sync only namespace object and network policies sync to happen from namespace reconcilation loop. Signed-off-by: Periyasamy Palanisamy --- .../pkg/ovn/base_network_controller.go | 9 ++- .../pkg/ovn/base_network_controller_policy.go | 55 ++++++++----------- .../ovn/base_network_controller_secondary.go | 10 +++- test/e2e/network_segmentation_policy.go | 2 + 4 files changed, 37 insertions(+), 39 deletions(-) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index bdb026752a..51c5c62dec 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -260,6 +260,10 @@ func (oc *BaseNetworkController) doReconcile(reconcileRoutes, reconcilePendingPo } } + // reconciles namespaces that were added to the network, this will trigger namespace add event and + // network controller creates the address set for the namespace. + // To update gress policy ACLs with peer namespace address set, invoke requeuePeerNamespace method after + // address set is created for the namespace. namespaceAdded := false for _, ns := range reconcileNamespaces { namespace, err := oc.watchFactory.GetNamespace(ns) @@ -277,11 +281,6 @@ func (oc *BaseNetworkController) doReconcile(reconcileRoutes, reconcilePendingPo if namespaceAdded { oc.retryNamespaces.RequestRetryObjs() } - - err := oc.requeuePeerNamespaces(reconcileNamespaces) - if err != nil { - klog.Infof("Failed to retry network policy peer namespaces for network %s: %v", oc.GetNetworkName(), err) - } } // BaseSecondaryNetworkController structure holds per-network fields and network specific diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index 1bb39137cf..e0acdafbdc 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -1496,20 +1496,10 @@ func (bnc *BaseNetworkController) peerNamespaceUpdate(np *networkPolicy, gp *gre return err } -// requeuePeerNamespaces enqueues the namespace into network policy peer namespace +// requeuePeerNamespace enqueues the namespace into network policy peer namespace // retry framework object(s) which need to be retried immediately with add event. -func (bnc *BaseNetworkController) requeuePeerNamespaces(namespaces []string) error { +func (bnc *BaseNetworkController) requeuePeerNamespace(namespace *corev1.Namespace) error { var errors []error - var peerNamespaces []*corev1.Namespace - for _, ns := range namespaces { - namespace, err := bnc.watchFactory.GetNamespace(ns) - if err != nil { - errors = append(errors, fmt.Errorf("failed to retrieve namespace %s for reconciling network %s: %w", - ns, bnc.GetNetworkName(), err)) - continue - } - peerNamespaces = append(peerNamespaces, namespace) - } npKeys := bnc.networkPolicies.GetKeys() for _, npKey := range npKeys { err := bnc.networkPolicies.DoWithLock(npKey, func(npKey string) error { @@ -1519,26 +1509,23 @@ func (bnc *BaseNetworkController) requeuePeerNamespaces(namespaces []string) err } np.RLock() defer np.RUnlock() + if np.deleted { + return nil + } var errors []error for _, reconcilePeerNamespace := range np.reconcilePeerNamespaces { - namespaceAdded := false - for _, namespace := range peerNamespaces { - // Filter out namespace when it's labels not matching with network policy peer namespace - // selector. - if !reconcilePeerNamespace.handler.FilterFunc(namespace) { - continue - } - err := reconcilePeerNamespace.retryFramework.AddRetryObjWithAddNoBackoff(namespace) - if err != nil { - errors = append(errors, fmt.Errorf("failed to retry peer namespace %s for network policy %s on network %s: %w", - namespace.Name, npKey, bnc.GetNetworkName(), err)) - continue - } - namespaceAdded = true + // Filter out namespace when it's labels not matching with network policy peer namespace + // selector. + if !reconcilePeerNamespace.handler.FilterFunc(namespace) { + continue } - if namespaceAdded { - reconcilePeerNamespace.retryFramework.RequestRetryObjs() + err := reconcilePeerNamespace.retryFramework.AddRetryObjWithAddNoBackoff(namespace) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retry peer namespace %s for network policy %s on network %s: %w", + namespace.Name, npKey, bnc.GetNetworkName(), err)) + continue } + reconcilePeerNamespace.retryFramework.RequestRetryObjs() } return utilerrors.Join(errors...) }) @@ -1587,11 +1574,13 @@ func (bnc *BaseNetworkController) addPeerNamespaceHandler( // a new peer namespace is newly created later under UDN network, it gets reconciled and // address set is created for the namespace. so we must reconcile it for network policy // as well to update gress policy ACL with matching peer namespace address set. - np.Lock() - np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, - &peerNamespacesRetry{retryFramework: retryPeerNamespaces, - handler: namespaceHandler}) - np.Unlock() + if bnc.IsPrimaryNetwork() { + np.Lock() + np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, + &peerNamespacesRetry{retryFramework: retryPeerNamespaces, + handler: namespaceHandler}) + np.Unlock() + } return nil } diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_secondary.go index cef46aaa6e..1b5cfdd5ac 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary.go @@ -679,7 +679,15 @@ func (bsnc *BaseSecondaryNetworkController) AddNamespaceForSecondaryNetwork(ns * if err != nil { return fmt.Errorf("failed to ensure namespace locked: %v", err) } - defer nsUnlock() + nsUnlock() + // Enqueue the UDN namespace into network policy controller if it needs to be + // processed by network policy peer namespace handlers. + if bsnc.IsPrimaryNetwork() { + err = bsnc.requeuePeerNamespace(ns) + if err != nil { + return fmt.Errorf("failed to requeue peer namespace %s: %v", ns.Name, err) + } + } return nil } diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 8abc3d6791..f00dd63bec 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -298,6 +298,8 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ return updatedPod.Status.Phase }, 1*time.Minute, 6*time.Second).Should(gomega.Equal(v1.PodPending)) + // The pod won't run and the namespace address set won't be created until the NAD for the network is added + // to the namespace and we test here that once that happens the policy is reconciled to account for it. ginkgo.By("creating NAD for red and orange namespaces and check pod moves into running state") for _, namespace := range []string{namespaceRed, namespaceOrange} { ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) From 0b513c6319e3258309ffbba0e67ce7a5d427b17e Mon Sep 17 00:00:00 2001 From: Dave Tucker Date: Fri, 4 Jul 2025 15:47:11 +0100 Subject: [PATCH 063/181] chore: bump libovsdb to v0.8.0 The diff between v0.7.0 and v0.8.0 is simply a rename from ovn-org/libovsdb to ovn-kubernetes/libovsdb. Signed-off-by: Dave Tucker --- docs/developer-guide/developer.md | 3 ++- go-controller/.golangci.yml | 7 +++--- go-controller/cmd/ovnkube/ovnkube.go | 2 +- go-controller/go.mod | 2 +- go-controller/go.sum | 4 ++-- go-controller/hack/update-modelgen.sh | 4 ++-- .../observability-lib/ovsdb/bridge.go | 2 +- .../ovsdb/flow_sample_collector_set.go | 2 +- .../observability-lib/ovsdb/interface.go | 2 +- .../observability-lib/ovsdb/observ_model.go | 2 +- .../sampledecoder/db_client.go | 4 ++-- .../sampledecoder/sample_decoder.go | 2 +- go-controller/pkg/cni/cni.go | 2 +- go-controller/pkg/cni/cni_test.go | 2 +- go-controller/pkg/cni/cniserver.go | 2 +- go-controller/pkg/cni/cniserver_test.go | 2 +- go-controller/pkg/cni/types.go | 2 +- .../controllermanager/controller_manager.go | 2 +- .../node_controller_manager.go | 2 +- go-controller/pkg/kubevirt/dhcp.go | 2 +- go-controller/pkg/kubevirt/pod.go | 2 +- go-controller/pkg/kubevirt/router.go | 2 +- go-controller/pkg/libovsdb/libovsdb.go | 4 ++-- go-controller/pkg/libovsdb/ops/acl.go | 4 ++-- go-controller/pkg/libovsdb/ops/address_set.go | 6 ++--- go-controller/pkg/libovsdb/ops/chassis.go | 2 +- go-controller/pkg/libovsdb/ops/copp.go | 4 ++-- go-controller/pkg/libovsdb/ops/dhcp.go | 4 ++-- go-controller/pkg/libovsdb/ops/lbgroup.go | 4 ++-- .../pkg/libovsdb/ops/loadbalancer.go | 6 ++--- go-controller/pkg/libovsdb/ops/mac_binding.go | 2 +- go-controller/pkg/libovsdb/ops/meter.go | 4 ++-- go-controller/pkg/libovsdb/ops/model.go | 6 ++--- .../pkg/libovsdb/ops/model_client.go | 6 ++--- .../pkg/libovsdb/ops/model_client_test.go | 6 ++--- go-controller/pkg/libovsdb/ops/nb_global.go | 2 +- go-controller/pkg/libovsdb/ops/ovs/bridge.go | 2 +- .../pkg/libovsdb/ops/ovs/interface.go | 2 +- .../pkg/libovsdb/ops/ovs/openvswitch.go | 2 +- go-controller/pkg/libovsdb/ops/portbinding.go | 2 +- go-controller/pkg/libovsdb/ops/portgroup.go | 4 ++-- go-controller/pkg/libovsdb/ops/qos.go | 4 ++-- go-controller/pkg/libovsdb/ops/router.go | 4 ++-- go-controller/pkg/libovsdb/ops/sample.go | 6 ++--- go-controller/pkg/libovsdb/ops/sb_global.go | 2 +- go-controller/pkg/libovsdb/ops/switch.go | 4 ++-- .../pkg/libovsdb/ops/template_var.go | 4 ++-- go-controller/pkg/libovsdb/ops/transact.go | 6 ++--- go-controller/pkg/libovsdb/util/acl.go | 2 +- .../pkg/libovsdb/util/address_set.go | 4 ++-- .../pkg/libovsdb/util/mac_binding.go | 6 ++--- go-controller/pkg/libovsdb/util/metric.go | 2 +- go-controller/pkg/libovsdb/util/nb_global.go | 2 +- go-controller/pkg/libovsdb/util/port.go | 2 +- go-controller/pkg/libovsdb/util/router.go | 2 +- go-controller/pkg/libovsdb/util/switch.go | 2 +- go-controller/pkg/metrics/metrics.go | 2 +- go-controller/pkg/metrics/ovn.go | 2 +- .../pkg/metrics/ovnkube_controller.go | 6 ++--- go-controller/pkg/metrics/ovs.go | 2 +- .../pkg/metrics/recorders/duration.go | 8 +++---- .../pkg/metrics/recorders/duration_test.go | 2 +- go-controller/pkg/nbdb/acl.go | 2 +- go-controller/pkg/nbdb/address_set.go | 2 +- go-controller/pkg/nbdb/bfd.go | 2 +- .../pkg/nbdb/chassis_template_var.go | 2 +- go-controller/pkg/nbdb/connection.go | 2 +- go-controller/pkg/nbdb/copp.go | 2 +- go-controller/pkg/nbdb/dhcp_options.go | 2 +- go-controller/pkg/nbdb/dhcp_relay.go | 2 +- go-controller/pkg/nbdb/dns.go | 2 +- go-controller/pkg/nbdb/forwarding_group.go | 2 +- go-controller/pkg/nbdb/gateway_chassis.go | 2 +- go-controller/pkg/nbdb/ha_chassis.go | 2 +- go-controller/pkg/nbdb/ha_chassis_group.go | 2 +- go-controller/pkg/nbdb/load_balancer.go | 2 +- go-controller/pkg/nbdb/load_balancer_group.go | 2 +- .../pkg/nbdb/load_balancer_health_check.go | 2 +- go-controller/pkg/nbdb/logical_router.go | 2 +- .../pkg/nbdb/logical_router_policy.go | 2 +- go-controller/pkg/nbdb/logical_router_port.go | 2 +- .../pkg/nbdb/logical_router_static_route.go | 2 +- go-controller/pkg/nbdb/logical_switch.go | 2 +- go-controller/pkg/nbdb/logical_switch_port.go | 2 +- go-controller/pkg/nbdb/meter.go | 2 +- go-controller/pkg/nbdb/meter_band.go | 2 +- go-controller/pkg/nbdb/mirror.go | 2 +- go-controller/pkg/nbdb/model.go | 4 ++-- go-controller/pkg/nbdb/nat.go | 2 +- go-controller/pkg/nbdb/nb_global.go | 2 +- go-controller/pkg/nbdb/port_group.go | 2 +- go-controller/pkg/nbdb/qos.go | 2 +- go-controller/pkg/nbdb/sample.go | 2 +- go-controller/pkg/nbdb/sample_collector.go | 2 +- go-controller/pkg/nbdb/sampling_app.go | 2 +- go-controller/pkg/nbdb/ssl.go | 2 +- go-controller/pkg/nbdb/static_mac_binding.go | 2 +- .../node/default_node_network_controller.go | 2 +- .../pkg/observability/observability.go | 4 ++-- .../pkg/observability/observability_test.go | 2 +- .../pkg/ovn/address_set/address_set.go | 4 ++-- .../pkg/ovn/address_set/address_set_test.go | 4 ++-- .../pkg/ovn/address_set/fake_address_set.go | 2 +- .../pkg/ovn/address_set/mocks/AddressSet.go | 2 +- .../address_set/mocks/AddressSetFactory.go | 2 +- .../pkg/ovn/base_network_controller.go | 4 ++-- .../pkg/ovn/base_network_controller_pods.go | 4 ++-- .../pkg/ovn/base_network_controller_policy.go | 4 ++-- .../ovn/base_network_controller_secondary.go | 4 ++-- .../admin_network_policy.go | 4 ++-- .../admin_network_policy_controller.go | 2 +- .../controller/admin_network_policy/repair.go | 2 +- .../admin_network_policy/status_test.go | 2 +- .../external_controller_policy_test.go | 2 +- .../controller/apbroute/master_controller.go | 2 +- .../ovn/controller/apbroute/network_client.go | 4 ++-- .../egressservice/egressservice_zone.go | 4 ++-- .../egressservice_zone_service.go | 2 +- .../network_qos/network_qos_controller.go | 2 +- .../network_qos/network_qos_ovnnb.go | 4 ++-- .../network_qos/network_qos_test.go | 2 +- .../ovn/controller/services/loadbalancer.go | 2 +- .../pkg/ovn/controller/services/repair.go | 4 ++-- .../services/services_controller.go | 4 ++-- .../services/services_controller_test.go | 2 +- .../controller/services/svc_template_var.go | 4 ++-- .../udnenabledsvc/udn_enabled_svc.go | 2 +- .../udnenabledsvc/udn_enabled_svc_test.go | 2 +- .../pkg/ovn/controller/unidling/unidle.go | 8 +++---- .../ovn/controller/unidling/unidle_test.go | 2 +- go-controller/pkg/ovn/copp.go | 2 +- .../pkg/ovn/default_network_controller.go | 2 +- .../pkg/ovn/dns_name_resolver/dns.go | 2 +- .../dns_name_resolver/dns_name_resolver.go | 2 +- .../pkg/ovn/dns_name_resolver/external_dns.go | 2 +- .../dns_name_resolver/external_dns_test.go | 2 +- .../dns_name_resolver/external_dns_tracker.go | 2 +- go-controller/pkg/ovn/egressfirewall.go | 2 +- go-controller/pkg/ovn/egressgw.go | 4 ++-- go-controller/pkg/ovn/egressip.go | 4 ++-- go-controller/pkg/ovn/egressqos.go | 2 +- .../logical_router_policy_sync.go | 4 ++-- .../ovn/external_ids_syncer/nat/nat_sync.go | 4 ++-- go-controller/pkg/ovn/gateway.go | 2 +- go-controller/pkg/ovn/gateway/gateway.go | 2 +- .../ovn/gatewayrouter/policybasedroutes.go | 2 +- go-controller/pkg/ovn/hybrid.go | 2 +- go-controller/pkg/ovn/hybrid_test.go | 2 +- go-controller/pkg/ovn/master.go | 2 +- go-controller/pkg/ovn/master_test.go | 2 +- go-controller/pkg/ovn/multihoming_test.go | 2 +- go-controller/pkg/ovn/namespace.go | 2 +- go-controller/pkg/ovn/ovn.go | 2 +- go-controller/pkg/ovn/ovn_test.go | 2 +- go-controller/pkg/ovn/pods.go | 2 +- go-controller/pkg/ovn/policy_test.go | 2 +- .../pkg/ovn/routeimport/route_import.go | 4 ++-- .../secondary_layer3_network_controller.go | 4 ++-- .../pkg/ovn/topology/topologyfactory.go | 2 +- .../pkg/ovn/topology/topologyfactory_test.go | 2 +- go-controller/pkg/ovn/udn_isolation.go | 4 ++-- .../ovn/zone_interconnect/chassis_handler.go | 2 +- .../zone_interconnect/chassis_handler_test.go | 2 +- .../ovn/zone_interconnect/zone_ic_handler.go | 4 ++-- .../zone_interconnect/zone_ic_handler_test.go | 2 +- go-controller/pkg/sbdb/address_set.go | 2 +- go-controller/pkg/sbdb/bfd.go | 2 +- go-controller/pkg/sbdb/chassis.go | 2 +- go-controller/pkg/sbdb/chassis_private.go | 2 +- .../pkg/sbdb/chassis_template_var.go | 2 +- go-controller/pkg/sbdb/connection.go | 2 +- go-controller/pkg/sbdb/controller_event.go | 2 +- go-controller/pkg/sbdb/datapath_binding.go | 2 +- go-controller/pkg/sbdb/dhcp_options.go | 2 +- go-controller/pkg/sbdb/dhcpv6_options.go | 2 +- go-controller/pkg/sbdb/dns.go | 2 +- go-controller/pkg/sbdb/encap.go | 2 +- go-controller/pkg/sbdb/fdb.go | 2 +- go-controller/pkg/sbdb/gateway_chassis.go | 2 +- go-controller/pkg/sbdb/ha_chassis.go | 2 +- go-controller/pkg/sbdb/ha_chassis_group.go | 2 +- go-controller/pkg/sbdb/igmp_group.go | 2 +- go-controller/pkg/sbdb/ip_multicast.go | 2 +- go-controller/pkg/sbdb/load_balancer.go | 2 +- go-controller/pkg/sbdb/logical_dp_group.go | 2 +- go-controller/pkg/sbdb/logical_flow.go | 2 +- go-controller/pkg/sbdb/mac_binding.go | 2 +- go-controller/pkg/sbdb/meter.go | 2 +- go-controller/pkg/sbdb/meter_band.go | 2 +- go-controller/pkg/sbdb/mirror.go | 2 +- go-controller/pkg/sbdb/model.go | 4 ++-- go-controller/pkg/sbdb/multicast_group.go | 2 +- go-controller/pkg/sbdb/port_binding.go | 2 +- go-controller/pkg/sbdb/port_group.go | 2 +- go-controller/pkg/sbdb/rbac_permission.go | 2 +- go-controller/pkg/sbdb/rbac_role.go | 2 +- go-controller/pkg/sbdb/sb_global.go | 2 +- go-controller/pkg/sbdb/service_monitor.go | 2 +- go-controller/pkg/sbdb/ssl.go | 2 +- go-controller/pkg/sbdb/static_mac_binding.go | 2 +- .../pkg/testing/libovsdb/libovsdb.go | 16 ++++++------- .../pkg/testing/libovsdb/matchers.go | 2 +- go-controller/pkg/testing/libovsdb/ops.go | 2 +- go-controller/pkg/util/ovs.go | 12 +++++----- go-controller/pkg/vswitchd/autoattach.go | 2 +- go-controller/pkg/vswitchd/bridge.go | 2 +- go-controller/pkg/vswitchd/controller.go | 2 +- .../pkg/vswitchd/ct_timeout_policy.go | 2 +- go-controller/pkg/vswitchd/ct_zone.go | 2 +- go-controller/pkg/vswitchd/datapath.go | 2 +- .../pkg/vswitchd/flow_sample_collector_set.go | 2 +- go-controller/pkg/vswitchd/flow_table.go | 2 +- go-controller/pkg/vswitchd/interface.go | 2 +- go-controller/pkg/vswitchd/ipfix.go | 2 +- go-controller/pkg/vswitchd/manager.go | 2 +- go-controller/pkg/vswitchd/mirror.go | 2 +- go-controller/pkg/vswitchd/model.go | 4 ++-- go-controller/pkg/vswitchd/netflow.go | 2 +- go-controller/pkg/vswitchd/open_vswitch.go | 2 +- go-controller/pkg/vswitchd/port.go | 2 +- go-controller/pkg/vswitchd/qos.go | 2 +- go-controller/pkg/vswitchd/queue.go | 2 +- go-controller/pkg/vswitchd/sflow.go | 2 +- go-controller/pkg/vswitchd/ssl.go | 2 +- .../libovsdb/LICENSE | 0 .../libovsdb/NOTICE | 0 .../libovsdb/cache/cache.go | 8 +++---- .../libovsdb/cache/doc.go | 0 .../libovsdb/cache/uuidset.go | 0 .../libovsdb/client/api.go | 6 ++--- .../libovsdb/client/api_test_model.go | 6 ++--- .../libovsdb/client/client.go | 10 ++++---- .../libovsdb/client/condition.go | 8 +++---- .../libovsdb/client/config.go | 0 .../libovsdb/client/doc.go | 0 .../libovsdb/client/metrics.go | 0 .../libovsdb/client/monitor.go | 4 ++-- .../libovsdb/client/options.go | 0 .../libovsdb/database/database.go | 4 ++-- .../libovsdb/database/doc.go | 0 .../libovsdb/database/inmemory/doc.go | 0 .../libovsdb/database/inmemory/inmemory.go | 10 ++++---- .../libovsdb/database/references.go | 0 .../libovsdb/database/transaction/doc.go | 0 .../libovsdb/database/transaction/errors.go | 2 +- .../database/transaction/transaction.go | 10 ++++---- .../libovsdb/mapper/info.go | 2 +- .../libovsdb/mapper/mapper.go | 2 +- .../libovsdb/model/client.go | 4 ++-- .../libovsdb/model/database.go | 4 ++-- .../libovsdb/model/model.go | 2 +- .../libovsdb/ovsdb/bindings.go | 0 .../libovsdb/ovsdb/condition.go | 0 .../libovsdb/ovsdb/error.go | 0 .../libovsdb/ovsdb/map.go | 0 .../libovsdb/ovsdb/monitor_select.go | 0 .../libovsdb/ovsdb/mutation.go | 0 .../libovsdb/ovsdb/named_uuid.go | 0 .../libovsdb/ovsdb/notation.go | 0 .../libovsdb/ovsdb/row.go | 0 .../libovsdb/ovsdb/rpc.go | 0 .../libovsdb/ovsdb/schema.go | 0 .../libovsdb/ovsdb/serverdb/.gitignore | 0 .../libovsdb/ovsdb/serverdb/database.go | 2 +- .../libovsdb/ovsdb/serverdb/gen.go | 0 .../libovsdb/ovsdb/serverdb/model.go | 4 ++-- .../libovsdb/ovsdb/set.go | 0 .../libovsdb/ovsdb/update3.go | 0 .../libovsdb/ovsdb/updates.go | 0 .../libovsdb/ovsdb/updates2.go | 0 .../libovsdb/ovsdb/uuid.go | 0 .../libovsdb/server/doc.go | 0 .../libovsdb/server/monitor.go | 4 ++-- .../libovsdb/server/server.go | 6 ++--- .../libovsdb/updates/difference.go | 0 .../libovsdb/updates/doc.go | 0 .../libovsdb/updates/merge.go | 2 +- .../libovsdb/updates/mutate.go | 2 +- .../libovsdb/updates/references.go | 6 ++--- .../libovsdb/updates/updates.go | 6 ++--- go-controller/vendor/modules.txt | 24 +++++++++---------- 281 files changed, 373 insertions(+), 371 deletions(-) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/LICENSE (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/NOTICE (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/cache/cache.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/cache/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/cache/uuidset.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/api.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/api_test_model.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/client.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/condition.go (97%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/config.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/metrics.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/monitor.go (97%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/client/options.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/database.go (93%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/inmemory/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/inmemory/inmemory.go (93%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/references.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/transaction/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/transaction/errors.go (91%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/database/transaction/transaction.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/mapper/info.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/mapper/mapper.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/model/client.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/model/database.go (97%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/model/model.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/bindings.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/condition.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/error.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/map.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/monitor_select.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/mutation.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/named_uuid.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/notation.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/row.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/rpc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/schema.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/serverdb/.gitignore (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/serverdb/database.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/serverdb/gen.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/serverdb/model.go (95%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/set.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/update3.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/updates.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/updates2.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/ovsdb/uuid.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/server/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/server/monitor.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/server/server.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/difference.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/doc.go (100%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/merge.go (98%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/mutate.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/references.go (99%) rename go-controller/vendor/github.com/{ovn-org => ovn-kubernetes}/libovsdb/updates/updates.go (99%) diff --git a/docs/developer-guide/developer.md b/docs/developer-guide/developer.md index d67bd62a9d..3dc7d0dfb8 100644 --- a/docs/developer-guide/developer.md +++ b/docs/developer-guide/developer.md @@ -5,11 +5,12 @@ This file aims to have information that is useful to the people contributing to ## Generating ovsdb bindings using modelgen In order to generate the latest NBDB and SBDB bindings, we have a tool called `modelgen` -which lives in the libovsdb repo: https://github.com/ovn-org/libovsdb#modelgen. It is a +which lives in the libovsdb repo: https://github.com/ovn-kubernetes/libovsdb#modelgen. It is a [code generator](https://go.dev/blog/generate) that uses `pkg/nbdb/gen.go` and `pkg/sbdb/gen.go` files to auto-generate the models and additional code like deep-copy methods. In order to use this tool do the following: + ``` $ cd go-controller/ $ make modelgen diff --git a/go-controller/.golangci.yml b/go-controller/.golangci.yml index 8f60edab95..d381676a37 100644 --- a/go-controller/.golangci.yml +++ b/go-controller/.golangci.yml @@ -33,6 +33,7 @@ linters-settings: - default - prefix(k8s.io,sigs.k8s.io) - prefix(github.com/ovn-org) + - prefix(github.com/ovn-kubernetes) - localmodule - dot @@ -41,7 +42,7 @@ linters-settings: disable: - fieldalignment - shadow - + importas: no-unaliased: true alias: @@ -57,9 +58,9 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl # Other frequently used deps - - pkg: github.com/ovn-org/libovsdb/ovsdb + - pkg: github.com/ovn-kubernetes/libovsdb/ovsdb alias: "" - + revive: rules: # TODO: enable recommended (default) revive rules diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index 39548a5c21..8021297d14 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -22,7 +22,7 @@ import ( "k8s.io/klog/v2" kexec "k8s.io/utils/exec" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/go.mod b/go-controller/go.mod index 7868b6ca26..f40f5001e2 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -38,7 +38,7 @@ require ( github.com/onsi/gomega v1.36.1 github.com/openshift/api v0.0.0-20231120222239-b86761094ee3 github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a - github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 + github.com/ovn-kubernetes/libovsdb v0.8.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 diff --git a/go-controller/go.sum b/go-controller/go.sum index 3dcc3208b3..50d5e1270d 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -639,8 +639,8 @@ github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= -github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= +github.com/ovn-kubernetes/libovsdb v0.8.0 h1:cWhqWb5rCiS3yTJ6VJ7s85cElE1NWWJ2XksPGLd5WII= +github.com/ovn-kubernetes/libovsdb v0.8.0/go.mod h1:8nqWvM5pjHRbI5K6Uy/yuA5MdhCnGhNFH5fsSjZD8Rc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= diff --git a/go-controller/hack/update-modelgen.sh b/go-controller/hack/update-modelgen.sh index 18c6f1a3cd..c17239f669 100755 --- a/go-controller/hack/update-modelgen.sh +++ b/go-controller/hack/update-modelgen.sh @@ -4,12 +4,12 @@ set -o pipefail # generate ovsdb bindings if ! ( command -v modelgen > /dev/null ); then - echo "modelgen not found, installing github.com/ovn-org/libovsdb/cmd/modelgen" + echo "modelgen not found, installing github.com/ovn-kubernetes/libovsdb/cmd/modelgen" olddir="${PWD}" builddir="$(mktemp -d)" cd "${builddir}" # ensure the hash value is not outdated, if wrong bindings are being generated re-install modelgen - GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@v0.7.0 + GO111MODULE=on go install github.com/ovn-kubernetes/libovsdb/cmd/modelgen@v0.8.0 cd "${olddir}" if [[ "${builddir}" == /tmp/* ]]; then #paranoia rm -rf "${builddir}" diff --git a/go-controller/observability-lib/ovsdb/bridge.go b/go-controller/observability-lib/ovsdb/bridge.go index d0135c4886..d918918bb0 100644 --- a/go-controller/observability-lib/ovsdb/bridge.go +++ b/go-controller/observability-lib/ovsdb/bridge.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BridgeTable = "Bridge" diff --git a/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go index 57a26e805d..b4b67f6055 100644 --- a/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go +++ b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" diff --git a/go-controller/observability-lib/ovsdb/interface.go b/go-controller/observability-lib/ovsdb/interface.go index e9f350995c..9e59b20738 100644 --- a/go-controller/observability-lib/ovsdb/interface.go +++ b/go-controller/observability-lib/ovsdb/interface.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const InterfaceTable = "Interface" diff --git a/go-controller/observability-lib/ovsdb/observ_model.go b/go-controller/observability-lib/ovsdb/observ_model.go index 22547a3f8c..4667acf5d5 100644 --- a/go-controller/observability-lib/ovsdb/observ_model.go +++ b/go-controller/observability-lib/ovsdb/observ_model.go @@ -1,6 +1,6 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" // ObservDatabaseModel returns the DatabaseModel object to be used by observability library. func ObservDatabaseModel() (model.ClientDBModel, error) { diff --git a/go-controller/observability-lib/sampledecoder/db_client.go b/go-controller/observability-lib/sampledecoder/db_client.go index 5587646356..9d65645601 100644 --- a/go-controller/observability-lib/sampledecoder/db_client.go +++ b/go-controller/observability-lib/sampledecoder/db_client.go @@ -10,8 +10,8 @@ import ( "k8s.io/klog/v2/textlogger" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/observability-lib/sampledecoder/sample_decoder.go b/go-controller/observability-lib/sampledecoder/sample_decoder.go index 341a0d1c18..d92c03b3e8 100644 --- a/go-controller/observability-lib/sampledecoder/sample_decoder.go +++ b/go-controller/observability-lib/sampledecoder/sample_decoder.go @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" diff --git a/go-controller/pkg/cni/cni.go b/go-controller/pkg/cni/cni.go index e2cc865265..faf800d52e 100644 --- a/go-controller/pkg/cni/cni.go +++ b/go-controller/pkg/cni/cni.go @@ -15,7 +15,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/udn" diff --git a/go-controller/pkg/cni/cni_test.go b/go-controller/pkg/cni/cni_test.go index ed3f5be1f0..778c83c03c 100644 --- a/go-controller/pkg/cni/cni_test.go +++ b/go-controller/pkg/cni/cni_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/cni/cniserver.go b/go-controller/pkg/cni/cniserver.go index 4ec851bbb7..17b888dd63 100644 --- a/go-controller/pkg/cni/cniserver.go +++ b/go-controller/pkg/cni/cniserver.go @@ -16,7 +16,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/cni/cniserver_test.go b/go-controller/pkg/cni/cniserver_test.go index 65c9962e2a..6edfbf49e2 100644 --- a/go-controller/pkg/cni/cniserver_test.go +++ b/go-controller/pkg/cni/cniserver_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/client-go/kubernetes/fake" utiltesting "k8s.io/client-go/util/testing" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" diff --git a/go-controller/pkg/cni/types.go b/go-controller/pkg/cni/types.go index f6c5e10727..7a20787d73 100644 --- a/go-controller/pkg/cni/types.go +++ b/go-controller/pkg/cni/types.go @@ -14,7 +14,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" diff --git a/go-controller/pkg/controllermanager/controller_manager.go b/go-controller/pkg/controllermanager/controller_manager.go index 6b5f1e9f89..27db274d05 100644 --- a/go-controller/pkg/controllermanager/controller_manager.go +++ b/go-controller/pkg/controllermanager/controller_manager.go @@ -14,7 +14,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/controllermanager/node_controller_manager.go b/go-controller/pkg/controllermanager/node_controller_manager.go index d3af2b3b40..aca81d30f7 100644 --- a/go-controller/pkg/controllermanager/node_controller_manager.go +++ b/go-controller/pkg/controllermanager/node_controller_manager.go @@ -13,7 +13,7 @@ import ( "k8s.io/klog/v2" kexec "k8s.io/utils/exec" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/kubevirt/dhcp.go b/go-controller/pkg/kubevirt/dhcp.go index 5e8534bd71..51cb600ebd 100644 --- a/go-controller/pkg/kubevirt/dhcp.go +++ b/go-controller/pkg/kubevirt/dhcp.go @@ -9,7 +9,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/kubevirt/pod.go b/go-controller/pkg/kubevirt/pod.go index 48cd5ed1c2..b0f43ffcbf 100644 --- a/go-controller/pkg/kubevirt/pod.go +++ b/go-controller/pkg/kubevirt/pod.go @@ -13,7 +13,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" diff --git a/go-controller/pkg/kubevirt/router.go b/go-controller/pkg/kubevirt/router.go index 06a6499e1f..ed4a5dfab2 100644 --- a/go-controller/pkg/kubevirt/router.go +++ b/go-controller/pkg/kubevirt/router.go @@ -8,7 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/libovsdb/libovsdb.go b/go-controller/pkg/libovsdb/libovsdb.go index 40bd1298fe..860ec26698 100644 --- a/go-controller/pkg/libovsdb/libovsdb.go +++ b/go-controller/pkg/libovsdb/libovsdb.go @@ -23,8 +23,8 @@ import ( "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/acl.go b/go-controller/pkg/libovsdb/ops/acl.go index cd671595b3..f9987fbeb7 100644 --- a/go-controller/pkg/libovsdb/ops/acl.go +++ b/go-controller/pkg/libovsdb/ops/acl.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/address_set.go b/go-controller/pkg/libovsdb/ops/address_set.go index c6a8ce16e7..90d251bbb5 100644 --- a/go-controller/pkg/libovsdb/ops/address_set.go +++ b/go-controller/pkg/libovsdb/ops/address_set.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -18,7 +18,7 @@ type addressSetPredicate func(*nbdb.AddressSet) bool // The purpose is to prevent libovsdb interpreting non-nil empty maps/slices // as default and thus being filtered out of the update. The intention is to // use non-nil empty maps/slices to clear them out in the update. -// See: https://github.com/ovn-org/libovsdb/issues/226 +// See: https://github.com/ovn-kubernetes/libovsdb/issues/226 func getNonZeroAddressSetMutableFields(as *nbdb.AddressSet) []interface{} { fields := []interface{}{} if as.Addresses != nil { diff --git a/go-controller/pkg/libovsdb/ops/chassis.go b/go-controller/pkg/libovsdb/ops/chassis.go index 0196da3463..83a2d6a3c2 100644 --- a/go-controller/pkg/libovsdb/ops/chassis.go +++ b/go-controller/pkg/libovsdb/ops/chassis.go @@ -5,7 +5,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/ops/copp.go b/go-controller/pkg/libovsdb/ops/copp.go index dac95c5c0e..a0f8697b1b 100644 --- a/go-controller/pkg/libovsdb/ops/copp.go +++ b/go-controller/pkg/libovsdb/ops/copp.go @@ -1,8 +1,8 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/dhcp.go b/go-controller/pkg/libovsdb/ops/dhcp.go index 94ab12800a..cb03fde11c 100644 --- a/go-controller/pkg/libovsdb/ops/dhcp.go +++ b/go-controller/pkg/libovsdb/ops/dhcp.go @@ -1,8 +1,8 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/lbgroup.go b/go-controller/pkg/libovsdb/ops/lbgroup.go index 8ab75c4d15..829cce5003 100644 --- a/go-controller/pkg/libovsdb/ops/lbgroup.go +++ b/go-controller/pkg/libovsdb/ops/lbgroup.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/loadbalancer.go b/go-controller/pkg/libovsdb/ops/loadbalancer.go index 221e980b0b..b984c99fce 100644 --- a/go-controller/pkg/libovsdb/ops/loadbalancer.go +++ b/go-controller/pkg/libovsdb/ops/loadbalancer.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -16,7 +16,7 @@ import ( // The purpose is to prevent libovsdb interpreting non-nil empty maps/slices // as default and thus being filtered out of the update. The intention is to // use non-nil empty maps/slices to clear them out in the update. -// See: https://github.com/ovn-org/libovsdb/issues/226 +// See: https://github.com/ovn-kubernetes/libovsdb/issues/226 func getNonZeroLoadBalancerMutableFields(lb *nbdb.LoadBalancer) []interface{} { fields := []interface{}{} if lb.Name != "" { diff --git a/go-controller/pkg/libovsdb/ops/mac_binding.go b/go-controller/pkg/libovsdb/ops/mac_binding.go index 1f7a76ba8b..0e1fe6718d 100644 --- a/go-controller/pkg/libovsdb/ops/mac_binding.go +++ b/go-controller/pkg/libovsdb/ops/mac_binding.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/meter.go b/go-controller/pkg/libovsdb/ops/meter.go index d08b27ecc1..666da32fa8 100644 --- a/go-controller/pkg/libovsdb/ops/meter.go +++ b/go-controller/pkg/libovsdb/ops/meter.go @@ -5,8 +5,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/model.go b/go-controller/pkg/libovsdb/ops/model.go index 76b525fa48..f266a16a64 100644 --- a/go-controller/pkg/libovsdb/ops/model.go +++ b/go-controller/pkg/libovsdb/ops/model.go @@ -4,9 +4,9 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/ops/model_client.go b/go-controller/pkg/libovsdb/ops/model_client.go index 0668c20399..bf72f086d9 100644 --- a/go-controller/pkg/libovsdb/ops/model_client.go +++ b/go-controller/pkg/libovsdb/ops/model_client.go @@ -8,9 +8,9 @@ import ( "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/libovsdb/ops/model_client_test.go b/go-controller/pkg/libovsdb/ops/model_client_test.go index 0ab218c631..52471d6408 100644 --- a/go-controller/pkg/libovsdb/ops/model_client_test.go +++ b/go-controller/pkg/libovsdb/ops/model_client_test.go @@ -8,9 +8,9 @@ import ( "github.com/onsi/gomega/types" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" diff --git a/go-controller/pkg/libovsdb/ops/nb_global.go b/go-controller/pkg/libovsdb/ops/nb_global.go index 88d962af0d..dc03be511e 100644 --- a/go-controller/pkg/libovsdb/ops/nb_global.go +++ b/go-controller/pkg/libovsdb/ops/nb_global.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/ovs/bridge.go b/go-controller/pkg/libovsdb/ops/ovs/bridge.go index d109f2b1b6..aa2deeb673 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/bridge.go +++ b/go-controller/pkg/libovsdb/ops/ovs/bridge.go @@ -3,7 +3,7 @@ package ovs import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/ovs/interface.go b/go-controller/pkg/libovsdb/ops/ovs/interface.go index 797e7421ca..41259ef5a4 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/interface.go +++ b/go-controller/pkg/libovsdb/ops/ovs/interface.go @@ -3,7 +3,7 @@ package ovs import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go index c6dd53a89f..a19df4e3fa 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go +++ b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/portbinding.go b/go-controller/pkg/libovsdb/ops/portbinding.go index 861a63cb95..0267a794c0 100644 --- a/go-controller/pkg/libovsdb/ops/portbinding.go +++ b/go-controller/pkg/libovsdb/ops/portbinding.go @@ -3,7 +3,7 @@ package ops import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/portgroup.go b/go-controller/pkg/libovsdb/ops/portgroup.go index 37a6a782af..8a7cfb27f1 100644 --- a/go-controller/pkg/libovsdb/ops/portgroup.go +++ b/go-controller/pkg/libovsdb/ops/portgroup.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/qos.go b/go-controller/pkg/libovsdb/ops/qos.go index 21d6a2f7f8..cfc1d0900a 100644 --- a/go-controller/pkg/libovsdb/ops/qos.go +++ b/go-controller/pkg/libovsdb/ops/qos.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index df87307918..18b3931a1f 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -8,8 +8,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/sample.go b/go-controller/pkg/libovsdb/ops/sample.go index cb0ddbc6bf..8e7799fce3 100644 --- a/go-controller/pkg/libovsdb/ops/sample.go +++ b/go-controller/pkg/libovsdb/ops/sample.go @@ -5,9 +5,9 @@ import ( "golang.org/x/net/context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/sb_global.go b/go-controller/pkg/libovsdb/ops/sb_global.go index 3fe14bf42d..28ee3ecad2 100644 --- a/go-controller/pkg/libovsdb/ops/sb_global.go +++ b/go-controller/pkg/libovsdb/ops/sb_global.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/switch.go b/go-controller/pkg/libovsdb/ops/switch.go index 01f724b45d..4136f96bba 100644 --- a/go-controller/pkg/libovsdb/ops/switch.go +++ b/go-controller/pkg/libovsdb/ops/switch.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/template_var.go b/go-controller/pkg/libovsdb/ops/template_var.go index 9c449d5f6b..ff4edc2f92 100644 --- a/go-controller/pkg/libovsdb/ops/template_var.go +++ b/go-controller/pkg/libovsdb/ops/template_var.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/transact.go b/go-controller/pkg/libovsdb/ops/transact.go index 312cfdaffa..37aaf6808d 100644 --- a/go-controller/pkg/libovsdb/ops/transact.go +++ b/go-controller/pkg/libovsdb/ops/transact.go @@ -9,9 +9,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ) diff --git a/go-controller/pkg/libovsdb/util/acl.go b/go-controller/pkg/libovsdb/util/acl.go index dbb6c2b3e5..7169b57798 100644 --- a/go-controller/pkg/libovsdb/util/acl.go +++ b/go-controller/pkg/libovsdb/util/acl.go @@ -7,7 +7,7 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/address_set.go b/go-controller/pkg/libovsdb/util/address_set.go index e4328d43a2..085abf2da6 100644 --- a/go-controller/pkg/libovsdb/util/address_set.go +++ b/go-controller/pkg/libovsdb/util/address_set.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/mac_binding.go b/go-controller/pkg/libovsdb/util/mac_binding.go index e6f11d8347..d6410a9b65 100644 --- a/go-controller/pkg/libovsdb/util/mac_binding.go +++ b/go-controller/pkg/libovsdb/util/mac_binding.go @@ -3,9 +3,9 @@ package util import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/util/metric.go b/go-controller/pkg/libovsdb/util/metric.go index 06c787f5fe..89015f343e 100644 --- a/go-controller/pkg/libovsdb/util/metric.go +++ b/go-controller/pkg/libovsdb/util/metric.go @@ -3,7 +3,7 @@ package util import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/nb_global.go b/go-controller/pkg/libovsdb/util/nb_global.go index 57d9c69b97..d1bc11f0a2 100644 --- a/go-controller/pkg/libovsdb/util/nb_global.go +++ b/go-controller/pkg/libovsdb/util/nb_global.go @@ -3,7 +3,7 @@ package util import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/port.go b/go-controller/pkg/libovsdb/util/port.go index 8e5cbe616f..bc82042419 100644 --- a/go-controller/pkg/libovsdb/util/port.go +++ b/go-controller/pkg/libovsdb/util/port.go @@ -5,7 +5,7 @@ import ( "net" "strings" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/router.go b/go-controller/pkg/libovsdb/util/router.go index 12ee755d28..b316fea0e3 100644 --- a/go-controller/pkg/libovsdb/util/router.go +++ b/go-controller/pkg/libovsdb/util/router.go @@ -8,7 +8,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/libovsdb/util/switch.go b/go-controller/pkg/libovsdb/util/switch.go index c0a3b0ca52..c3b1eb9e02 100644 --- a/go-controller/pkg/libovsdb/util/switch.go +++ b/go-controller/pkg/libovsdb/util/switch.go @@ -9,7 +9,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/metrics/metrics.go b/go-controller/pkg/metrics/metrics.go index ea86a65c0d..b8e9d736c7 100644 --- a/go-controller/pkg/metrics/metrics.go +++ b/go-controller/pkg/metrics/metrics.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/metrics/ovn.go b/go-controller/pkg/metrics/ovn.go index 63f057e38f..51510fd7f9 100644 --- a/go-controller/pkg/metrics/ovn.go +++ b/go-controller/pkg/metrics/ovn.go @@ -9,7 +9,7 @@ import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/metrics/ovnkube_controller.go b/go-controller/pkg/metrics/ovnkube_controller.go index a4cb9fd693..30c846d07c 100644 --- a/go-controller/pkg/metrics/ovnkube_controller.go +++ b/go-controller/pkg/metrics/ovnkube_controller.go @@ -16,9 +16,9 @@ import ( "k8s.io/client-go/util/workqueue" klog "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/cache" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/metrics/ovs.go b/go-controller/pkg/metrics/ovs.go index 455142ae6b..718fa031e7 100644 --- a/go-controller/pkg/metrics/ovs.go +++ b/go-controller/pkg/metrics/ovs.go @@ -14,7 +14,7 @@ import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" diff --git a/go-controller/pkg/metrics/recorders/duration.go b/go-controller/pkg/metrics/recorders/duration.go index c0ae704e8f..4376283c20 100644 --- a/go-controller/pkg/metrics/recorders/duration.go +++ b/go-controller/pkg/metrics/recorders/duration.go @@ -11,10 +11,10 @@ import ( "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/cache" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/metrics/recorders/duration_test.go b/go-controller/pkg/metrics/recorders/duration_test.go index ee436d8d1b..2e725f99c3 100644 --- a/go-controller/pkg/metrics/recorders/duration_test.go +++ b/go-controller/pkg/metrics/recorders/duration_test.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeclientgo "k8s.io/client-go/kubernetes/fake" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" diff --git a/go-controller/pkg/nbdb/acl.go b/go-controller/pkg/nbdb/acl.go index 0c2840c178..5415af620b 100644 --- a/go-controller/pkg/nbdb/acl.go +++ b/go-controller/pkg/nbdb/acl.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ACLTable = "ACL" diff --git a/go-controller/pkg/nbdb/address_set.go b/go-controller/pkg/nbdb/address_set.go index e8a836e2d1..be37eaf40d 100644 --- a/go-controller/pkg/nbdb/address_set.go +++ b/go-controller/pkg/nbdb/address_set.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AddressSetTable = "Address_Set" diff --git a/go-controller/pkg/nbdb/bfd.go b/go-controller/pkg/nbdb/bfd.go index 46646e81a7..4211ceae80 100644 --- a/go-controller/pkg/nbdb/bfd.go +++ b/go-controller/pkg/nbdb/bfd.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BFDTable = "BFD" diff --git a/go-controller/pkg/nbdb/chassis_template_var.go b/go-controller/pkg/nbdb/chassis_template_var.go index 602c3f5223..59c61d07de 100644 --- a/go-controller/pkg/nbdb/chassis_template_var.go +++ b/go-controller/pkg/nbdb/chassis_template_var.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTemplateVarTable = "Chassis_Template_Var" diff --git a/go-controller/pkg/nbdb/connection.go b/go-controller/pkg/nbdb/connection.go index baf6da344b..da2aa4bca3 100644 --- a/go-controller/pkg/nbdb/connection.go +++ b/go-controller/pkg/nbdb/connection.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ConnectionTable = "Connection" diff --git a/go-controller/pkg/nbdb/copp.go b/go-controller/pkg/nbdb/copp.go index 1e146b657e..54bbc841f6 100644 --- a/go-controller/pkg/nbdb/copp.go +++ b/go-controller/pkg/nbdb/copp.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CoppTable = "Copp" diff --git a/go-controller/pkg/nbdb/dhcp_options.go b/go-controller/pkg/nbdb/dhcp_options.go index fd68ebee2d..7b58c1fe35 100644 --- a/go-controller/pkg/nbdb/dhcp_options.go +++ b/go-controller/pkg/nbdb/dhcp_options.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPOptionsTable = "DHCP_Options" diff --git a/go-controller/pkg/nbdb/dhcp_relay.go b/go-controller/pkg/nbdb/dhcp_relay.go index f0e973ab78..5e10f2aff4 100644 --- a/go-controller/pkg/nbdb/dhcp_relay.go +++ b/go-controller/pkg/nbdb/dhcp_relay.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPRelayTable = "DHCP_Relay" diff --git a/go-controller/pkg/nbdb/dns.go b/go-controller/pkg/nbdb/dns.go index 285d5df280..a15b166a80 100644 --- a/go-controller/pkg/nbdb/dns.go +++ b/go-controller/pkg/nbdb/dns.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DNSTable = "DNS" diff --git a/go-controller/pkg/nbdb/forwarding_group.go b/go-controller/pkg/nbdb/forwarding_group.go index 1a0657559d..82078551d3 100644 --- a/go-controller/pkg/nbdb/forwarding_group.go +++ b/go-controller/pkg/nbdb/forwarding_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ForwardingGroupTable = "Forwarding_Group" diff --git a/go-controller/pkg/nbdb/gateway_chassis.go b/go-controller/pkg/nbdb/gateway_chassis.go index 15935847b8..de6925f4c3 100644 --- a/go-controller/pkg/nbdb/gateway_chassis.go +++ b/go-controller/pkg/nbdb/gateway_chassis.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const GatewayChassisTable = "Gateway_Chassis" diff --git a/go-controller/pkg/nbdb/ha_chassis.go b/go-controller/pkg/nbdb/ha_chassis.go index dc09d1ec9d..8c171ddd09 100644 --- a/go-controller/pkg/nbdb/ha_chassis.go +++ b/go-controller/pkg/nbdb/ha_chassis.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisTable = "HA_Chassis" diff --git a/go-controller/pkg/nbdb/ha_chassis_group.go b/go-controller/pkg/nbdb/ha_chassis_group.go index bdda95aaf7..6d304fd2e9 100644 --- a/go-controller/pkg/nbdb/ha_chassis_group.go +++ b/go-controller/pkg/nbdb/ha_chassis_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisGroupTable = "HA_Chassis_Group" diff --git a/go-controller/pkg/nbdb/load_balancer.go b/go-controller/pkg/nbdb/load_balancer.go index 03bcd76011..8bddd25f4a 100644 --- a/go-controller/pkg/nbdb/load_balancer.go +++ b/go-controller/pkg/nbdb/load_balancer.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerTable = "Load_Balancer" diff --git a/go-controller/pkg/nbdb/load_balancer_group.go b/go-controller/pkg/nbdb/load_balancer_group.go index 7759249674..8d39f095ab 100644 --- a/go-controller/pkg/nbdb/load_balancer_group.go +++ b/go-controller/pkg/nbdb/load_balancer_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerGroupTable = "Load_Balancer_Group" diff --git a/go-controller/pkg/nbdb/load_balancer_health_check.go b/go-controller/pkg/nbdb/load_balancer_health_check.go index c8163fa007..8fc7020364 100644 --- a/go-controller/pkg/nbdb/load_balancer_health_check.go +++ b/go-controller/pkg/nbdb/load_balancer_health_check.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerHealthCheckTable = "Load_Balancer_Health_Check" diff --git a/go-controller/pkg/nbdb/logical_router.go b/go-controller/pkg/nbdb/logical_router.go index 81c5efaf9d..f303af80fa 100644 --- a/go-controller/pkg/nbdb/logical_router.go +++ b/go-controller/pkg/nbdb/logical_router.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterTable = "Logical_Router" diff --git a/go-controller/pkg/nbdb/logical_router_policy.go b/go-controller/pkg/nbdb/logical_router_policy.go index 7272dbb8ad..51b29ea706 100644 --- a/go-controller/pkg/nbdb/logical_router_policy.go +++ b/go-controller/pkg/nbdb/logical_router_policy.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterPolicyTable = "Logical_Router_Policy" diff --git a/go-controller/pkg/nbdb/logical_router_port.go b/go-controller/pkg/nbdb/logical_router_port.go index d39fe0db42..1d220b82d1 100644 --- a/go-controller/pkg/nbdb/logical_router_port.go +++ b/go-controller/pkg/nbdb/logical_router_port.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterPortTable = "Logical_Router_Port" diff --git a/go-controller/pkg/nbdb/logical_router_static_route.go b/go-controller/pkg/nbdb/logical_router_static_route.go index ce966e5707..205741626c 100644 --- a/go-controller/pkg/nbdb/logical_router_static_route.go +++ b/go-controller/pkg/nbdb/logical_router_static_route.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route" diff --git a/go-controller/pkg/nbdb/logical_switch.go b/go-controller/pkg/nbdb/logical_switch.go index 50b8214ad3..8a342dd315 100644 --- a/go-controller/pkg/nbdb/logical_switch.go +++ b/go-controller/pkg/nbdb/logical_switch.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalSwitchTable = "Logical_Switch" diff --git a/go-controller/pkg/nbdb/logical_switch_port.go b/go-controller/pkg/nbdb/logical_switch_port.go index c048f76541..b211672bff 100644 --- a/go-controller/pkg/nbdb/logical_switch_port.go +++ b/go-controller/pkg/nbdb/logical_switch_port.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalSwitchPortTable = "Logical_Switch_Port" diff --git a/go-controller/pkg/nbdb/meter.go b/go-controller/pkg/nbdb/meter.go index 09b7e9e6a4..e3a4a713da 100644 --- a/go-controller/pkg/nbdb/meter.go +++ b/go-controller/pkg/nbdb/meter.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterTable = "Meter" diff --git a/go-controller/pkg/nbdb/meter_band.go b/go-controller/pkg/nbdb/meter_band.go index 4ef0d901ac..1e1e7ad421 100644 --- a/go-controller/pkg/nbdb/meter_band.go +++ b/go-controller/pkg/nbdb/meter_band.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterBandTable = "Meter_Band" diff --git a/go-controller/pkg/nbdb/mirror.go b/go-controller/pkg/nbdb/mirror.go index 57e3b01f6d..352cc238af 100644 --- a/go-controller/pkg/nbdb/mirror.go +++ b/go-controller/pkg/nbdb/mirror.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/nbdb/model.go b/go-controller/pkg/nbdb/model.go index daabac4530..9fbe25db4f 100644 --- a/go-controller/pkg/nbdb/model.go +++ b/go-controller/pkg/nbdb/model.go @@ -6,8 +6,8 @@ package nbdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/pkg/nbdb/nat.go b/go-controller/pkg/nbdb/nat.go index 4bd1b7ed49..b10bbd25b3 100644 --- a/go-controller/pkg/nbdb/nat.go +++ b/go-controller/pkg/nbdb/nat.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NATTable = "NAT" diff --git a/go-controller/pkg/nbdb/nb_global.go b/go-controller/pkg/nbdb/nb_global.go index bae9e20f20..3779d259fe 100644 --- a/go-controller/pkg/nbdb/nb_global.go +++ b/go-controller/pkg/nbdb/nb_global.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NBGlobalTable = "NB_Global" diff --git a/go-controller/pkg/nbdb/port_group.go b/go-controller/pkg/nbdb/port_group.go index bf4fa809bc..525f84d90e 100644 --- a/go-controller/pkg/nbdb/port_group.go +++ b/go-controller/pkg/nbdb/port_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortGroupTable = "Port_Group" diff --git a/go-controller/pkg/nbdb/qos.go b/go-controller/pkg/nbdb/qos.go index d25322b4b2..3303f61c4d 100644 --- a/go-controller/pkg/nbdb/qos.go +++ b/go-controller/pkg/nbdb/qos.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QoSTable = "QoS" diff --git a/go-controller/pkg/nbdb/sample.go b/go-controller/pkg/nbdb/sample.go index 639393a1e6..d53ef23825 100644 --- a/go-controller/pkg/nbdb/sample.go +++ b/go-controller/pkg/nbdb/sample.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SampleTable = "Sample" diff --git a/go-controller/pkg/nbdb/sample_collector.go b/go-controller/pkg/nbdb/sample_collector.go index 50f0659040..487465ee0f 100644 --- a/go-controller/pkg/nbdb/sample_collector.go +++ b/go-controller/pkg/nbdb/sample_collector.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SampleCollectorTable = "Sample_Collector" diff --git a/go-controller/pkg/nbdb/sampling_app.go b/go-controller/pkg/nbdb/sampling_app.go index a152b4237d..cd7458da83 100644 --- a/go-controller/pkg/nbdb/sampling_app.go +++ b/go-controller/pkg/nbdb/sampling_app.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SamplingAppTable = "Sampling_App" diff --git a/go-controller/pkg/nbdb/ssl.go b/go-controller/pkg/nbdb/ssl.go index ddaba5d322..847ea8c362 100644 --- a/go-controller/pkg/nbdb/ssl.go +++ b/go-controller/pkg/nbdb/ssl.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" diff --git a/go-controller/pkg/nbdb/static_mac_binding.go b/go-controller/pkg/nbdb/static_mac_binding.go index 15207e6484..c3397e3e70 100644 --- a/go-controller/pkg/nbdb/static_mac_binding.go +++ b/go-controller/pkg/nbdb/static_mac_binding.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const StaticMACBindingTable = "Static_MAC_Binding" diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index f9f3b36ec5..7a75c36984 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -28,7 +28,7 @@ import ( utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" honode "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" diff --git a/go-controller/pkg/observability/observability.go b/go-controller/pkg/observability/observability.go index a3ffbb54f3..9348966f13 100644 --- a/go-controller/pkg/observability/observability.go +++ b/go-controller/pkg/observability/observability.go @@ -10,8 +10,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/observability/observability_test.go b/go-controller/pkg/observability/observability_test.go index cfda506362..a247150093 100644 --- a/go-controller/pkg/observability/observability_test.go +++ b/go-controller/pkg/observability/observability_test.go @@ -4,7 +4,7 @@ import ( "strings" "time" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/address_set/address_set.go b/go-controller/pkg/ovn/address_set/address_set.go index a0b709eafc..ea5e035e22 100644 --- a/go-controller/pkg/ovn/address_set/address_set.go +++ b/go-controller/pkg/ovn/address_set/address_set.go @@ -7,8 +7,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/address_set/address_set_test.go b/go-controller/pkg/ovn/address_set/address_set_test.go index 40dec33d24..4c1c0af814 100644 --- a/go-controller/pkg/ovn/address_set/address_set_test.go +++ b/go-controller/pkg/ovn/address_set/address_set_test.go @@ -5,8 +5,8 @@ import ( "github.com/onsi/gomega" "github.com/urfave/cli/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/address_set/fake_address_set.go b/go-controller/pkg/ovn/address_set/fake_address_set.go index 48f56bb616..2f783b3486 100644 --- a/go-controller/pkg/ovn/address_set/fake_address_set.go +++ b/go-controller/pkg/ovn/address_set/fake_address_set.go @@ -11,7 +11,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/address_set/mocks/AddressSet.go b/go-controller/pkg/ovn/address_set/mocks/AddressSet.go index f8b9761b26..f5dd89448f 100644 --- a/go-controller/pkg/ovn/address_set/mocks/AddressSet.go +++ b/go-controller/pkg/ovn/address_set/mocks/AddressSet.go @@ -3,7 +3,7 @@ package mocks import ( - ovsdb "github.com/ovn-org/libovsdb/ovsdb" + ovsdb "github.com/ovn-kubernetes/libovsdb/ovsdb" mock "github.com/stretchr/testify/mock" ) diff --git a/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go b/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go index f76d6f132a..0d18215185 100644 --- a/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go +++ b/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go @@ -8,7 +8,7 @@ import ( ops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - ovsdb "github.com/ovn-org/libovsdb/ovsdb" + ovsdb "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // AddressSetFactory is an autogenerated mock type for the AddressSetFactory type diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 51c5c62dec..02c82b172f 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -22,8 +22,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/base_network_controller_pods.go b/go-controller/pkg/ovn/base_network_controller_pods.go index c6a105aa2a..1147983e79 100644 --- a/go-controller/pkg/ovn/base_network_controller_pods.go +++ b/go-controller/pkg/ovn/base_network_controller_pods.go @@ -18,8 +18,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" subnetipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip/subnet" diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index e0acdafbdc..bf1a253518 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -14,8 +14,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_secondary.go index 1b5cfdd5ac..f9c6d0b18f 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary.go @@ -18,8 +18,8 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go index 1433544a0b..b1d345bcf4 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go @@ -16,8 +16,8 @@ import ( "k8s.io/klog/v2" anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go index 1d230a6a45..080dd22d19 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go @@ -21,7 +21,7 @@ import ( anpinformer "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/apis/v1alpha1" anplister "sigs.k8s.io/network-policy-api/pkg/client/listers/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/repair.go b/go-controller/pkg/ovn/controller/admin_network_policy/repair.go index 55bf85e71f..84c0cf2a50 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/repair.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/repair.go @@ -9,7 +9,7 @@ import ( "k8s.io/klog/v2" anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go index ac63e873e6..6a28fa60d3 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go @@ -13,7 +13,7 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go index 802f31e7bb..2605fad7bc 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" diff --git a/go-controller/pkg/ovn/controller/apbroute/master_controller.go b/go-controller/pkg/ovn/controller/apbroute/master_controller.go index 324da524bd..82549c35dd 100644 --- a/go-controller/pkg/ovn/controller/apbroute/master_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/master_controller.go @@ -14,7 +14,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1" diff --git a/go-controller/pkg/ovn/controller/apbroute/network_client.go b/go-controller/pkg/ovn/controller/apbroute/network_client.go index 2f3de1da3a..5faca37f55 100644 --- a/go-controller/pkg/ovn/controller/apbroute/network_client.go +++ b/go-controller/pkg/ovn/controller/apbroute/network_client.go @@ -14,8 +14,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedroutelisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1" diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go index 23f9f8f665..b9c68eb594 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go @@ -23,8 +23,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressserviceapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go index e19a3cd77f..192204171b 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go @@ -13,7 +13,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 15511e35d8..3a75fc0f30 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go index 82eed9b07e..febc4d1953 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -6,8 +6,8 @@ import ( "slices" "strconv" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index fd92922479..4d771825ed 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" nqostype "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" diff --git a/go-controller/pkg/ovn/controller/services/loadbalancer.go b/go-controller/pkg/ovn/controller/services/loadbalancer.go index 8c3d1c9114..025bb80d95 100644 --- a/go-controller/pkg/ovn/controller/services/loadbalancer.go +++ b/go-controller/pkg/ovn/controller/services/loadbalancer.go @@ -10,7 +10,7 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/core" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/services/repair.go b/go-controller/pkg/ovn/controller/services/repair.go index a9d37389fa..169bc64069 100644 --- a/go-controller/pkg/ovn/controller/services/repair.go +++ b/go-controller/pkg/ovn/controller/services/repair.go @@ -11,8 +11,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/services/services_controller.go b/go-controller/pkg/ovn/controller/services/services_controller.go index e03ad40b5c..664af03536 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller.go +++ b/go-controller/pkg/ovn/controller/services/services_controller.go @@ -28,8 +28,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/controller/services/services_controller_test.go b/go-controller/pkg/ovn/controller/services/services_controller_test.go index 6937f6beca..777d175628 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller_test.go +++ b/go-controller/pkg/ovn/controller/services/services_controller_test.go @@ -21,7 +21,7 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/controller/services/svc_template_var.go b/go-controller/pkg/ovn/controller/services/svc_template_var.go index 2ffcd03cc7..8cf4ee640b 100644 --- a/go-controller/pkg/ovn/controller/services/svc_template_var.go +++ b/go-controller/pkg/ovn/controller/services/svc_template_var.go @@ -8,8 +8,8 @@ import ( corev1 "k8s.io/api/core/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go index 59ac681e07..c96bc3a36d 100644 --- a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go index fd0e09545e..7d3cd9e72f 100644 --- a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/unidling/unidle.go b/go-controller/pkg/ovn/controller/unidling/unidle.go index bcad0edf4c..d3c65e10fa 100644 --- a/go-controller/pkg/ovn/controller/unidling/unidle.go +++ b/go-controller/pkg/ovn/controller/unidling/unidle.go @@ -12,10 +12,10 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - libovsdbcache "github.com/ovn-org/libovsdb/cache" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbcache "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/ovn/controller/unidling/unidle_test.go b/go-controller/pkg/ovn/controller/unidling/unidle_test.go index 039968d696..3317b65c00 100644 --- a/go-controller/pkg/ovn/controller/unidling/unidle_test.go +++ b/go-controller/pkg/ovn/controller/unidling/unidle_test.go @@ -13,7 +13,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" diff --git a/go-controller/pkg/ovn/copp.go b/go-controller/pkg/ovn/copp.go index 4afc0bba76..39f2f092d4 100644 --- a/go-controller/pkg/ovn/copp.go +++ b/go-controller/pkg/ovn/copp.go @@ -3,7 +3,7 @@ package ovn import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index d6a0231ea6..ed79067e8e 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -366,7 +366,7 @@ func (oc *DefaultNetworkController) init() error { } klog.V(5).Infof("Existing number of nodes: %d", len(existingNodes)) - // FIXME: When https://github.com/ovn-org/libovsdb/issues/235 is fixed, + // FIXME: When https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed, // use IsTableSupported(nbdb.LoadBalancerGroup). if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") diff --git a/go-controller/pkg/ovn/dns_name_resolver/dns.go b/go-controller/pkg/ovn/dns_name_resolver/dns.go index f2ae3ddc2e..17b6b76471 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/dns.go +++ b/go-controller/pkg/ovn/dns_name_resolver/dns.go @@ -9,7 +9,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go b/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go index 0b8ad32f11..b0d29f0d3d 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go +++ b/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go @@ -1,7 +1,7 @@ package dnsnameresolver import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" ) diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go index 57bff7dec2..cd542c48e1 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go @@ -14,7 +14,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" egressfirewalllister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1" diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go index 26e4107244..72661bd4ed 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go @@ -13,7 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go index 8ace7203e2..730bd026af 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go @@ -8,7 +8,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/egressfirewall.go b/go-controller/pkg/ovn/egressfirewall.go index 4e49505d04..20e444b3a7 100644 --- a/go-controller/pkg/ovn/egressfirewall.go +++ b/go-controller/pkg/ovn/egressfirewall.go @@ -20,7 +20,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index 27f9e2b970..1f28955295 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -17,8 +17,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index d53ba5e633..37f87b695e 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -26,8 +26,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/egressqos.go b/go-controller/pkg/ovn/egressqos.go index fc6258408e..605b127d03 100644 --- a/go-controller/pkg/ovn/egressqos.go +++ b/go-controller/pkg/ovn/egressqos.go @@ -24,7 +24,7 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go index 01cbd40512..cbe1835322 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go @@ -9,8 +9,8 @@ import ( "k8s.io/klog/v2" utilsnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go index 617bddd411..712f6fe541 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go @@ -7,8 +7,8 @@ import ( "k8s.io/klog/v2" utilsnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 7c38289737..54005e4301 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -15,7 +15,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/gateway/gateway.go b/go-controller/pkg/ovn/gateway/gateway.go index c6e10ab4a9..f716528810 100644 --- a/go-controller/pkg/ovn/gateway/gateway.go +++ b/go-controller/pkg/ovn/gateway/gateway.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go index e2866ba946..4f61101282 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/hybrid.go b/go-controller/pkg/ovn/hybrid.go index 7c84dea2aa..41f98075f1 100644 --- a/go-controller/pkg/ovn/hybrid.go +++ b/go-controller/pkg/ovn/hybrid.go @@ -12,7 +12,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" houtil "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/util" diff --git a/go-controller/pkg/ovn/hybrid_test.go b/go-controller/pkg/ovn/hybrid_test.go index 1663e5a8f8..fab60e2c3b 100644 --- a/go-controller/pkg/ovn/hybrid_test.go +++ b/go-controller/pkg/ovn/hybrid_test.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" cm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager" diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index c2ca98a59e..f85cdb75c3 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" houtil "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/util" diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 0c3ba9e7a8..5c1c8b2c4b 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -23,7 +23,7 @@ import ( clienttesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" diff --git a/go-controller/pkg/ovn/multihoming_test.go b/go-controller/pkg/ovn/multihoming_test.go index ab3d12425a..a7b69c3fb9 100644 --- a/go-controller/pkg/ovn/multihoming_test.go +++ b/go-controller/pkg/ovn/multihoming_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/namespace.go b/go-controller/pkg/ovn/namespace.go index 127c034735..01f189228b 100644 --- a/go-controller/pkg/ovn/namespace.go +++ b/go-controller/pkg/ovn/namespace.go @@ -9,7 +9,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 692c768d95..c6a53ee34e 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -17,7 +17,7 @@ import ( ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt" diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index 801777854a..0a1b9e3c8f 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -25,7 +25,7 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 949d48da55..9f39376d9e 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -13,7 +13,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/policy_test.go b/go-controller/pkg/ovn/policy_test.go index 657f3d074a..bcfb4898a3 100644 --- a/go-controller/pkg/ovn/policy_test.go +++ b/go-controller/pkg/ovn/policy_test.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/routeimport/route_import.go b/go-controller/pkg/ovn/routeimport/route_import.go index 94da3d34fe..18c372c276 100644 --- a/go-controller/pkg/ovn/routeimport/route_import.go +++ b/go-controller/pkg/ovn/routeimport/route_import.go @@ -15,8 +15,8 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" controllerutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" nbdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index a6c2d500bd..15fdb98aa7 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -684,7 +684,7 @@ func (oc *SecondaryLayer3NetworkController) init() error { } } - // FIXME: When https://github.com/ovn-org/libovsdb/issues/235 is fixed, + // FIXME: When https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed, // use IsTableSupported(nbdb.LoadBalancerGroup). if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") diff --git a/go-controller/pkg/ovn/topology/topologyfactory.go b/go-controller/pkg/ovn/topology/topologyfactory.go index 8781612242..d9a1980cbc 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory.go +++ b/go-controller/pkg/ovn/topology/topologyfactory.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/topology/topologyfactory_test.go b/go-controller/pkg/ovn/topology/topologyfactory_test.go index 01b113c97e..af8a036d6f 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory_test.go +++ b/go-controller/pkg/ovn/topology/topologyfactory_test.go @@ -5,7 +5,7 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 6c44489f9c..d9f6ddfde1 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -10,8 +10,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" diff --git a/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go b/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go index 172cac5e33..b838221892 100644 --- a/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go index 05b9fb6b9c..df74e807d1 100644 --- a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index f484bc1528..9d088e6659 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -12,8 +12,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go index 8af1215714..c0a54a1d61 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go @@ -13,7 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/sbdb/address_set.go b/go-controller/pkg/sbdb/address_set.go index b3b1c3c2d8..88b221dedf 100644 --- a/go-controller/pkg/sbdb/address_set.go +++ b/go-controller/pkg/sbdb/address_set.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AddressSetTable = "Address_Set" diff --git a/go-controller/pkg/sbdb/bfd.go b/go-controller/pkg/sbdb/bfd.go index cf27814b51..eb3822e902 100644 --- a/go-controller/pkg/sbdb/bfd.go +++ b/go-controller/pkg/sbdb/bfd.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BFDTable = "BFD" diff --git a/go-controller/pkg/sbdb/chassis.go b/go-controller/pkg/sbdb/chassis.go index 3526f096f2..3cbffee206 100644 --- a/go-controller/pkg/sbdb/chassis.go +++ b/go-controller/pkg/sbdb/chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTable = "Chassis" diff --git a/go-controller/pkg/sbdb/chassis_private.go b/go-controller/pkg/sbdb/chassis_private.go index 1e8c3764bd..dc848a1569 100644 --- a/go-controller/pkg/sbdb/chassis_private.go +++ b/go-controller/pkg/sbdb/chassis_private.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisPrivateTable = "Chassis_Private" diff --git a/go-controller/pkg/sbdb/chassis_template_var.go b/go-controller/pkg/sbdb/chassis_template_var.go index 212e772be6..2e8213ade8 100644 --- a/go-controller/pkg/sbdb/chassis_template_var.go +++ b/go-controller/pkg/sbdb/chassis_template_var.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTemplateVarTable = "Chassis_Template_Var" diff --git a/go-controller/pkg/sbdb/connection.go b/go-controller/pkg/sbdb/connection.go index 8f96f54226..2deb8bd30a 100644 --- a/go-controller/pkg/sbdb/connection.go +++ b/go-controller/pkg/sbdb/connection.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ConnectionTable = "Connection" diff --git a/go-controller/pkg/sbdb/controller_event.go b/go-controller/pkg/sbdb/controller_event.go index 741ffd028a..0233181ca6 100644 --- a/go-controller/pkg/sbdb/controller_event.go +++ b/go-controller/pkg/sbdb/controller_event.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ControllerEventTable = "Controller_Event" diff --git a/go-controller/pkg/sbdb/datapath_binding.go b/go-controller/pkg/sbdb/datapath_binding.go index 10247286f7..295660e9c3 100644 --- a/go-controller/pkg/sbdb/datapath_binding.go +++ b/go-controller/pkg/sbdb/datapath_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatapathBindingTable = "Datapath_Binding" diff --git a/go-controller/pkg/sbdb/dhcp_options.go b/go-controller/pkg/sbdb/dhcp_options.go index e9ec44ce29..e0bb7627f1 100644 --- a/go-controller/pkg/sbdb/dhcp_options.go +++ b/go-controller/pkg/sbdb/dhcp_options.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPOptionsTable = "DHCP_Options" diff --git a/go-controller/pkg/sbdb/dhcpv6_options.go b/go-controller/pkg/sbdb/dhcpv6_options.go index 908d1e0ad0..95a2a8d8f4 100644 --- a/go-controller/pkg/sbdb/dhcpv6_options.go +++ b/go-controller/pkg/sbdb/dhcpv6_options.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPv6OptionsTable = "DHCPv6_Options" diff --git a/go-controller/pkg/sbdb/dns.go b/go-controller/pkg/sbdb/dns.go index 95c0a52d1e..c044f990b0 100644 --- a/go-controller/pkg/sbdb/dns.go +++ b/go-controller/pkg/sbdb/dns.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DNSTable = "DNS" diff --git a/go-controller/pkg/sbdb/encap.go b/go-controller/pkg/sbdb/encap.go index 9a2f17fba2..4c524a52ca 100644 --- a/go-controller/pkg/sbdb/encap.go +++ b/go-controller/pkg/sbdb/encap.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const EncapTable = "Encap" diff --git a/go-controller/pkg/sbdb/fdb.go b/go-controller/pkg/sbdb/fdb.go index 8253e7059b..346593ac6f 100644 --- a/go-controller/pkg/sbdb/fdb.go +++ b/go-controller/pkg/sbdb/fdb.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FDBTable = "FDB" diff --git a/go-controller/pkg/sbdb/gateway_chassis.go b/go-controller/pkg/sbdb/gateway_chassis.go index a84ad7fc47..f08883222d 100644 --- a/go-controller/pkg/sbdb/gateway_chassis.go +++ b/go-controller/pkg/sbdb/gateway_chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const GatewayChassisTable = "Gateway_Chassis" diff --git a/go-controller/pkg/sbdb/ha_chassis.go b/go-controller/pkg/sbdb/ha_chassis.go index b0b3cebbba..b40d7999e3 100644 --- a/go-controller/pkg/sbdb/ha_chassis.go +++ b/go-controller/pkg/sbdb/ha_chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisTable = "HA_Chassis" diff --git a/go-controller/pkg/sbdb/ha_chassis_group.go b/go-controller/pkg/sbdb/ha_chassis_group.go index 1cc013c705..72a5622f5b 100644 --- a/go-controller/pkg/sbdb/ha_chassis_group.go +++ b/go-controller/pkg/sbdb/ha_chassis_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisGroupTable = "HA_Chassis_Group" diff --git a/go-controller/pkg/sbdb/igmp_group.go b/go-controller/pkg/sbdb/igmp_group.go index 73a0bb9437..19381eb855 100644 --- a/go-controller/pkg/sbdb/igmp_group.go +++ b/go-controller/pkg/sbdb/igmp_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IGMPGroupTable = "IGMP_Group" diff --git a/go-controller/pkg/sbdb/ip_multicast.go b/go-controller/pkg/sbdb/ip_multicast.go index 493cd342d2..902b7204f1 100644 --- a/go-controller/pkg/sbdb/ip_multicast.go +++ b/go-controller/pkg/sbdb/ip_multicast.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IPMulticastTable = "IP_Multicast" diff --git a/go-controller/pkg/sbdb/load_balancer.go b/go-controller/pkg/sbdb/load_balancer.go index bc341807e7..7bf4da265a 100644 --- a/go-controller/pkg/sbdb/load_balancer.go +++ b/go-controller/pkg/sbdb/load_balancer.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerTable = "Load_Balancer" diff --git a/go-controller/pkg/sbdb/logical_dp_group.go b/go-controller/pkg/sbdb/logical_dp_group.go index 911de2eed0..86727f4486 100644 --- a/go-controller/pkg/sbdb/logical_dp_group.go +++ b/go-controller/pkg/sbdb/logical_dp_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalDPGroupTable = "Logical_DP_Group" diff --git a/go-controller/pkg/sbdb/logical_flow.go b/go-controller/pkg/sbdb/logical_flow.go index 42af1cdf54..da2341990d 100644 --- a/go-controller/pkg/sbdb/logical_flow.go +++ b/go-controller/pkg/sbdb/logical_flow.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalFlowTable = "Logical_Flow" diff --git a/go-controller/pkg/sbdb/mac_binding.go b/go-controller/pkg/sbdb/mac_binding.go index 705431f1d0..9764c6dc35 100644 --- a/go-controller/pkg/sbdb/mac_binding.go +++ b/go-controller/pkg/sbdb/mac_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MACBindingTable = "MAC_Binding" diff --git a/go-controller/pkg/sbdb/meter.go b/go-controller/pkg/sbdb/meter.go index 95c4daec2f..9d86874c0b 100644 --- a/go-controller/pkg/sbdb/meter.go +++ b/go-controller/pkg/sbdb/meter.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterTable = "Meter" diff --git a/go-controller/pkg/sbdb/meter_band.go b/go-controller/pkg/sbdb/meter_band.go index addb01b645..10d3d740f8 100644 --- a/go-controller/pkg/sbdb/meter_band.go +++ b/go-controller/pkg/sbdb/meter_band.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterBandTable = "Meter_Band" diff --git a/go-controller/pkg/sbdb/mirror.go b/go-controller/pkg/sbdb/mirror.go index 69444ea735..b9139214ca 100644 --- a/go-controller/pkg/sbdb/mirror.go +++ b/go-controller/pkg/sbdb/mirror.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/sbdb/model.go b/go-controller/pkg/sbdb/model.go index bc838fe497..c5420638e5 100644 --- a/go-controller/pkg/sbdb/model.go +++ b/go-controller/pkg/sbdb/model.go @@ -6,8 +6,8 @@ package sbdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/pkg/sbdb/multicast_group.go b/go-controller/pkg/sbdb/multicast_group.go index 1af933ea6c..b8e2a828d9 100644 --- a/go-controller/pkg/sbdb/multicast_group.go +++ b/go-controller/pkg/sbdb/multicast_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MulticastGroupTable = "Multicast_Group" diff --git a/go-controller/pkg/sbdb/port_binding.go b/go-controller/pkg/sbdb/port_binding.go index b3d30f843a..48668023fc 100644 --- a/go-controller/pkg/sbdb/port_binding.go +++ b/go-controller/pkg/sbdb/port_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortBindingTable = "Port_Binding" diff --git a/go-controller/pkg/sbdb/port_group.go b/go-controller/pkg/sbdb/port_group.go index 358e26b33d..e197ae6e4d 100644 --- a/go-controller/pkg/sbdb/port_group.go +++ b/go-controller/pkg/sbdb/port_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortGroupTable = "Port_Group" diff --git a/go-controller/pkg/sbdb/rbac_permission.go b/go-controller/pkg/sbdb/rbac_permission.go index 9d760527e9..228c56bfe8 100644 --- a/go-controller/pkg/sbdb/rbac_permission.go +++ b/go-controller/pkg/sbdb/rbac_permission.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const RBACPermissionTable = "RBAC_Permission" diff --git a/go-controller/pkg/sbdb/rbac_role.go b/go-controller/pkg/sbdb/rbac_role.go index ce8798645c..427582d3b8 100644 --- a/go-controller/pkg/sbdb/rbac_role.go +++ b/go-controller/pkg/sbdb/rbac_role.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const RBACRoleTable = "RBAC_Role" diff --git a/go-controller/pkg/sbdb/sb_global.go b/go-controller/pkg/sbdb/sb_global.go index 2374478db7..667fdae3e0 100644 --- a/go-controller/pkg/sbdb/sb_global.go +++ b/go-controller/pkg/sbdb/sb_global.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SBGlobalTable = "SB_Global" diff --git a/go-controller/pkg/sbdb/service_monitor.go b/go-controller/pkg/sbdb/service_monitor.go index d3e1188680..189f09f659 100644 --- a/go-controller/pkg/sbdb/service_monitor.go +++ b/go-controller/pkg/sbdb/service_monitor.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ServiceMonitorTable = "Service_Monitor" diff --git a/go-controller/pkg/sbdb/ssl.go b/go-controller/pkg/sbdb/ssl.go index 3fab5fd1e9..08c8e641cf 100644 --- a/go-controller/pkg/sbdb/ssl.go +++ b/go-controller/pkg/sbdb/ssl.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" diff --git a/go-controller/pkg/sbdb/static_mac_binding.go b/go-controller/pkg/sbdb/static_mac_binding.go index 370968f604..8a3c590e31 100644 --- a/go-controller/pkg/sbdb/static_mac_binding.go +++ b/go-controller/pkg/sbdb/static_mac_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const StaticMACBindingTable = "Static_MAC_Binding" diff --git a/go-controller/pkg/testing/libovsdb/libovsdb.go b/go-controller/pkg/testing/libovsdb/libovsdb.go index 8f10bab356..a6836811d1 100644 --- a/go-controller/pkg/testing/libovsdb/libovsdb.go +++ b/go-controller/pkg/testing/libovsdb/libovsdb.go @@ -21,14 +21,14 @@ import ( "k8s.io/apimachinery/pkg/util/wait" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/database/inmemory" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/ovsdb/serverdb" - "github.com/ovn-org/libovsdb/server" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/database/inmemory" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb" + "github.com/ovn-kubernetes/libovsdb/server" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand" diff --git a/go-controller/pkg/testing/libovsdb/matchers.go b/go-controller/pkg/testing/libovsdb/matchers.go index 102d8fbc63..1ff3977065 100644 --- a/go-controller/pkg/testing/libovsdb/matchers.go +++ b/go-controller/pkg/testing/libovsdb/matchers.go @@ -9,7 +9,7 @@ import ( gomegaformat "github.com/onsi/gomega/format" gomegatypes "github.com/onsi/gomega/types" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ) // isSetEqual compares a slice as an unordered set diff --git a/go-controller/pkg/testing/libovsdb/ops.go b/go-controller/pkg/testing/libovsdb/ops.go index 1926bbc3f5..de73c1d154 100644 --- a/go-controller/pkg/testing/libovsdb/ops.go +++ b/go-controller/pkg/testing/libovsdb/ops.go @@ -6,7 +6,7 @@ import ( "fmt" "hash/fnv" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" ) diff --git a/go-controller/pkg/util/ovs.go b/go-controller/pkg/util/ovs.go index ff21e828db..7c1028995a 100644 --- a/go-controller/pkg/util/ovs.go +++ b/go-controller/pkg/util/ovs.go @@ -377,7 +377,7 @@ func RunOVNAppctlWithTimeout(timeout int, args ...string) (string, string, error // Run the ovn-ctl command and retry if "Connection refused" // poll waitng for service to become available -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func runOVNretry(cmdPath string, envVars []string, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { retriesLeft := ovnCmdRetryCount @@ -434,14 +434,14 @@ func getNbOVSDBArgs(command string, args ...string) []string { } // RunOVNNbctlWithTimeout runs command via ovn-nbctl with a specific timeout -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctlWithTimeout(timeout int, args ...string) (string, string, error) { stdout, stderr, err := RunOVNNbctlRawOutput(timeout, args...) return strings.Trim(strings.TrimSpace(stdout), "\""), stderr, err } // RunOVNNbctlRawOutput returns the output with no trimming or other string manipulation -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctlRawOutput(timeout int, args ...string) (string, string, error) { cmdArgs, envVars := getNbctlArgsAndEnv(timeout, args...) stdout, stderr, err := runOVNretry(runner.nbctlPath, envVars, cmdArgs...) @@ -449,13 +449,13 @@ func RunOVNNbctlRawOutput(timeout int, args ...string) (string, string, error) { } // RunOVNNbctl runs a command via ovn-nbctl. -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctl(args ...string) (string, string, error) { return RunOVNNbctlWithTimeout(ovsCommandTimeout, args...) } // RunOVNSbctlWithTimeout runs command via ovn-sbctl with a specific timeout -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNSbctlWithTimeout(timeout int, args ...string) (string, string, error) { var cmdArgs []string @@ -499,7 +499,7 @@ func RunOVSDBClientOVNNB(command string, args ...string) (string, string, error) } // RunOVNSbctl runs a command via ovn-sbctl. -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNSbctl(args ...string) (string, string, error) { return RunOVNSbctlWithTimeout(ovsCommandTimeout, args...) } diff --git a/go-controller/pkg/vswitchd/autoattach.go b/go-controller/pkg/vswitchd/autoattach.go index b9655736aa..e54dbba3ae 100644 --- a/go-controller/pkg/vswitchd/autoattach.go +++ b/go-controller/pkg/vswitchd/autoattach.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AutoAttachTable = "AutoAttach" diff --git a/go-controller/pkg/vswitchd/bridge.go b/go-controller/pkg/vswitchd/bridge.go index 8953faa3f2..14997f995b 100644 --- a/go-controller/pkg/vswitchd/bridge.go +++ b/go-controller/pkg/vswitchd/bridge.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BridgeTable = "Bridge" diff --git a/go-controller/pkg/vswitchd/controller.go b/go-controller/pkg/vswitchd/controller.go index 1b38c989bf..ff02062eaa 100644 --- a/go-controller/pkg/vswitchd/controller.go +++ b/go-controller/pkg/vswitchd/controller.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ControllerTable = "Controller" diff --git a/go-controller/pkg/vswitchd/ct_timeout_policy.go b/go-controller/pkg/vswitchd/ct_timeout_policy.go index 98bf690498..150db9b2f7 100644 --- a/go-controller/pkg/vswitchd/ct_timeout_policy.go +++ b/go-controller/pkg/vswitchd/ct_timeout_policy.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CTTimeoutPolicyTable = "CT_Timeout_Policy" diff --git a/go-controller/pkg/vswitchd/ct_zone.go b/go-controller/pkg/vswitchd/ct_zone.go index 4eaba845c4..6868191974 100644 --- a/go-controller/pkg/vswitchd/ct_zone.go +++ b/go-controller/pkg/vswitchd/ct_zone.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CTZoneTable = "CT_Zone" diff --git a/go-controller/pkg/vswitchd/datapath.go b/go-controller/pkg/vswitchd/datapath.go index 71a995f93e..899f5d3531 100644 --- a/go-controller/pkg/vswitchd/datapath.go +++ b/go-controller/pkg/vswitchd/datapath.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatapathTable = "Datapath" diff --git a/go-controller/pkg/vswitchd/flow_sample_collector_set.go b/go-controller/pkg/vswitchd/flow_sample_collector_set.go index 2c90f5d438..8c975711a5 100644 --- a/go-controller/pkg/vswitchd/flow_sample_collector_set.go +++ b/go-controller/pkg/vswitchd/flow_sample_collector_set.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" diff --git a/go-controller/pkg/vswitchd/flow_table.go b/go-controller/pkg/vswitchd/flow_table.go index 42d49d2f58..911b6fbb1d 100644 --- a/go-controller/pkg/vswitchd/flow_table.go +++ b/go-controller/pkg/vswitchd/flow_table.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowTableTable = "Flow_Table" diff --git a/go-controller/pkg/vswitchd/interface.go b/go-controller/pkg/vswitchd/interface.go index e6f67ba9c7..6f89cc5d1a 100644 --- a/go-controller/pkg/vswitchd/interface.go +++ b/go-controller/pkg/vswitchd/interface.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const InterfaceTable = "Interface" diff --git a/go-controller/pkg/vswitchd/ipfix.go b/go-controller/pkg/vswitchd/ipfix.go index 72b5d3915c..8ea91c8fd1 100644 --- a/go-controller/pkg/vswitchd/ipfix.go +++ b/go-controller/pkg/vswitchd/ipfix.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IPFIXTable = "IPFIX" diff --git a/go-controller/pkg/vswitchd/manager.go b/go-controller/pkg/vswitchd/manager.go index ff1df96caa..45a9dcb609 100644 --- a/go-controller/pkg/vswitchd/manager.go +++ b/go-controller/pkg/vswitchd/manager.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ManagerTable = "Manager" diff --git a/go-controller/pkg/vswitchd/mirror.go b/go-controller/pkg/vswitchd/mirror.go index 044455d253..2bab171097 100644 --- a/go-controller/pkg/vswitchd/mirror.go +++ b/go-controller/pkg/vswitchd/mirror.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/vswitchd/model.go b/go-controller/pkg/vswitchd/model.go index c862f04277..20b8d0cc94 100644 --- a/go-controller/pkg/vswitchd/model.go +++ b/go-controller/pkg/vswitchd/model.go @@ -6,8 +6,8 @@ package vswitchd import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/pkg/vswitchd/netflow.go b/go-controller/pkg/vswitchd/netflow.go index f958587044..d1f05029fd 100644 --- a/go-controller/pkg/vswitchd/netflow.go +++ b/go-controller/pkg/vswitchd/netflow.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NetFlowTable = "NetFlow" diff --git a/go-controller/pkg/vswitchd/open_vswitch.go b/go-controller/pkg/vswitchd/open_vswitch.go index e8ea481d5b..e8a1456fe9 100644 --- a/go-controller/pkg/vswitchd/open_vswitch.go +++ b/go-controller/pkg/vswitchd/open_vswitch.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const OpenvSwitchTable = "Open_vSwitch" diff --git a/go-controller/pkg/vswitchd/port.go b/go-controller/pkg/vswitchd/port.go index cf0ba96153..6aa3350c93 100644 --- a/go-controller/pkg/vswitchd/port.go +++ b/go-controller/pkg/vswitchd/port.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortTable = "Port" diff --git a/go-controller/pkg/vswitchd/qos.go b/go-controller/pkg/vswitchd/qos.go index aa1c9dd004..0ac14541d9 100644 --- a/go-controller/pkg/vswitchd/qos.go +++ b/go-controller/pkg/vswitchd/qos.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QoSTable = "QoS" diff --git a/go-controller/pkg/vswitchd/queue.go b/go-controller/pkg/vswitchd/queue.go index e8615e9cf7..60094eb8c2 100644 --- a/go-controller/pkg/vswitchd/queue.go +++ b/go-controller/pkg/vswitchd/queue.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QueueTable = "Queue" diff --git a/go-controller/pkg/vswitchd/sflow.go b/go-controller/pkg/vswitchd/sflow.go index fcbcc8569e..58841d7877 100644 --- a/go-controller/pkg/vswitchd/sflow.go +++ b/go-controller/pkg/vswitchd/sflow.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SFlowTable = "sFlow" diff --git a/go-controller/pkg/vswitchd/ssl.go b/go-controller/pkg/vswitchd/ssl.go index 79c4b1bad4..84dfbd1f33 100644 --- a/go-controller/pkg/vswitchd/ssl.go +++ b/go-controller/pkg/vswitchd/ssl.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/LICENSE b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/LICENSE rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/NOTICE b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/NOTICE rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go index 0b1e09e721..ffe871fd3e 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go @@ -15,10 +15,10 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/updates" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/updates" ) const ( diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go index 4977589442..f6a8d6fb34 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go @@ -7,9 +7,9 @@ import ( "reflect" "github.com/go-logr/logr" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // API defines basic operations to interact with the database diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go index 36ea476e08..7a97b6d08c 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go @@ -4,9 +4,9 @@ import ( "encoding/json" "testing" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go index 10ea757ec7..3926ad6ddf 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go @@ -20,11 +20,11 @@ import ( "github.com/cenkalti/rpc2/jsonrpc" "github.com/go-logr/logr" "github.com/go-logr/stdr" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/ovsdb/serverdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb" ) // Constants defined for libovsdb diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go index 1dfabda02e..1269339cea 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go @@ -4,10 +4,10 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Conditional is the interface used by the ConditionalAPI to match on cache objects diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/config.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/config.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/metrics.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go index 4a0270a87a..767a4cf3d6 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go @@ -5,8 +5,8 @@ import ( "reflect" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) const emptyUUID = "00000000-0000-0000-0000-000000000000" diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/options.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/options.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go similarity index 93% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go index 12f1222f19..9bdb69568b 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go @@ -2,8 +2,8 @@ package database import ( "github.com/google/uuid" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Database abstracts a database that a server can use to store and transact data diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go similarity index 93% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go index 6c1dce9e79..763dcd7fd0 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go @@ -9,11 +9,11 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/cache" - dbase "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/database/transaction" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + dbase "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/database/transaction" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) type inMemoryDatabase struct { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/references.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/references.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go similarity index 91% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go index 35e47c7294..204a7f544a 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go @@ -3,7 +3,7 @@ package transaction import ( "fmt" - "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/cache" ) func newIndexExistsDetails(err cache.ErrIndexExists) string { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go index 69736d0048..77b8e920c0 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go @@ -7,11 +7,11 @@ import ( "github.com/go-logr/logr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/updates" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/updates" ) type Transaction struct { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go index 8ac436c790..0e24ef25ec 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ErrColumnNotFound is an error that can occur when the column does not exist for a table diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go index 5ca7a412bb..24ce7b3b8c 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Mapper offers functions to interact with libovsdb through user-provided native structs. diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go index 5eb686244a..e8a39260e9 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go @@ -4,8 +4,8 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ColumnKey addresses a column and optionally a key within a column diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go index 0857d903f3..30ccff67b1 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go @@ -4,8 +4,8 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // A DatabaseModel represents libovsdb's metadata about the database. diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go index c8575f5bf3..249db69921 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go @@ -5,7 +5,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // A Model is the base interface used to build Database Models. It is used diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go index 274a7164fe..a93ca0d86f 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go @@ -3,7 +3,7 @@ package serverdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatabaseTable = "Database" diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go similarity index 95% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go index 3c117faa26..c0aeeb74c3 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go @@ -6,8 +6,8 @@ package serverdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go index 2dedf992b0..305769a212 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go @@ -7,8 +7,8 @@ import ( "github.com/cenkalti/rpc2" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // connectionMonitors maps a connection to a map or monitors diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go index ec60ea5d20..830560fc36 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go @@ -14,9 +14,9 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // OvsdbServer is an ovsdb server diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/difference.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go index 562f226232..82d78239f6 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go index 1d87737fcd..b91ef85341 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go @@ -3,7 +3,7 @@ package updates import ( "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go index 938d02aae9..4d998e0511 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go @@ -3,9 +3,9 @@ package updates import ( "fmt" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ReferenceProvider should be implemented by a database that tracks references diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go index 4ff2363a05..00fbcccffa 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go @@ -4,9 +4,9 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) type rowUpdate2 = ovsdb.RowUpdate2 diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index a0ecf2cb4a..5732a53975 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -385,19 +385,19 @@ github.com/openshift/client-go/network/listers/network/v1alpha1 # github.com/openshift/custom-resource-status v1.1.2 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 -# github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 +# github.com/ovn-kubernetes/libovsdb v0.8.0 ## explicit; go 1.18 -github.com/ovn-org/libovsdb/cache -github.com/ovn-org/libovsdb/client -github.com/ovn-org/libovsdb/database -github.com/ovn-org/libovsdb/database/inmemory -github.com/ovn-org/libovsdb/database/transaction -github.com/ovn-org/libovsdb/mapper -github.com/ovn-org/libovsdb/model -github.com/ovn-org/libovsdb/ovsdb -github.com/ovn-org/libovsdb/ovsdb/serverdb -github.com/ovn-org/libovsdb/server -github.com/ovn-org/libovsdb/updates +github.com/ovn-kubernetes/libovsdb/cache +github.com/ovn-kubernetes/libovsdb/client +github.com/ovn-kubernetes/libovsdb/database +github.com/ovn-kubernetes/libovsdb/database/inmemory +github.com/ovn-kubernetes/libovsdb/database/transaction +github.com/ovn-kubernetes/libovsdb/mapper +github.com/ovn-kubernetes/libovsdb/model +github.com/ovn-kubernetes/libovsdb/ovsdb +github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb +github.com/ovn-kubernetes/libovsdb/server +github.com/ovn-kubernetes/libovsdb/updates # github.com/pborman/uuid v1.2.0 ## explicit github.com/pborman/uuid From 0e00ae6d43bd8d976385a47dafb2be364a7ad41d Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Tue, 1 Jul 2025 10:45:02 +0300 Subject: [PATCH 064/181] contrib,kind: Init container runtime binary on cluster deletion On cluster delete operations the container runtime binary (represented by OCI_BIN) is hardcoded. Set OCI_BIN according to env. Signed-off-by: Or Mergi --- contrib/kind.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/kind.sh b/contrib/kind.sh index 3d8bd0f30e..958c907e68 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -42,6 +42,8 @@ function setup_kubectl_bin() { # The root cause is unknown, this also can not be reproduced in Ubuntu 20.04 or # with Fedora32 Cloud, but it does not happen if we clean first the ovn-kubernetes resources. delete() { + OCI_BIN=${KIND_EXPERIMENTAL_PROVIDER:-docker} + if [ "$KIND_INSTALL_METALLB" == true ]; then destroy_metallb fi From 6ec1a4489b479a20306ec6d81f5a4882ab8d52c5 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Tue, 1 Jul 2025 10:50:56 +0300 Subject: [PATCH 065/181] kind-common, metallb: Avoid hard coding container runtime binary Set OCI_BIN according to env. Some inspect operation that use formatting did not work in podman due to formatting differences comparing to docker. The format string is changes to a form that fits both docker and podman With the new format string, the index keyword is redundant hence removed. Signed-off-by: Or Mergi --- contrib/kind-common | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/contrib/kind-common b/contrib/kind-common index e8bfb7be01..2c4b7d445f 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -174,16 +174,16 @@ EOF # Override GOBIN until https://github.com/metallb/metallb/issues/2218 is fixed. GOBIN="" inv dev-env -n ovn -b frr -p bgp -i "${ip_family}" - docker network rm -f clientnet - docker network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet - docker network connect clientnet frr + $OCI_BIN network rm -f clientnet + $OCI_BIN network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet + $OCI_BIN network connect clientnet frr if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # Enable IPv6 forwarding in FRR - docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1 + $OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1 fi # Note: this image let's us use it also for creating load balancer backends that can send big packets - docker rm -f lbclient - docker run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice + $OCI_BIN rm -f lbclient + $OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice popd delete_metallb_dir @@ -197,18 +197,18 @@ EOF kubectl label node "$n" node.kubernetes.io/exclude-from-external-load-balancers- done - kind_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "IPAddress"}}' frr) + kind_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' frr) echo "FRR kind network IPv4: ${kind_network_v4}" - kind_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "GlobalIPv6Address"}}' frr) + kind_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}' frr) echo "FRR kind network IPv6: ${kind_network_v6}" local client_network_v4 client_network_v6 - client_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "IPAddress"}}' frr) + client_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.IPAddress}}' frr) echo "FRR client network IPv4: ${client_network_v4}" - client_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "GlobalIPv6Address"}}' frr) + client_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.GlobalIPv6Address}}' frr) echo "FRR client network IPv6: ${client_network_v6}" local client_subnets - client_subnets=$(docker network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}') + client_subnets=$($OCI_BIN network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}') echo "${client_subnets}" local client_subnets_v4 client_subnets_v6 client_subnets_v4=$(echo "${client_subnets}" | cut -d '#' -f 1) @@ -219,10 +219,10 @@ EOF KIND_NODES=$(kind_get_nodes) for n in ${KIND_NODES}; do if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - docker exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}" + $OCI_BIN exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}" fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - docker exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}" + $OCI_BIN exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}" fi done @@ -230,10 +230,10 @@ EOF # one svcVIP (192.168.10.0/fc00:f853:ccd:e799::) is more than enough since at a time we will only # have one load balancer service if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - docker exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0 + $OCI_BIN exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0 fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - docker exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0 + $OCI_BIN exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0 fi sleep 30 } @@ -254,14 +254,14 @@ install_plugins() { } destroy_metallb() { - if docker ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then - docker stop lbclient + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then + $OCI_BIN stop lbclient fi - if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then - docker stop frr + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then + $OCI_BIN stop frr fi - if docker network ls --format '{{.Name}}' | grep -q '^clientnet$'; then - docker network rm clientnet + if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^clientnet$'; then + $OCI_BIN network rm clientnet fi delete_metallb_dir } From 7d1999188349af5648438fb9518d0b6bb3676c75 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Tue, 1 Jul 2025 10:54:57 +0300 Subject: [PATCH 066/181] kind-common, k8s-frr, bgp: Avoid hard coding container runtime binary Set OCI_BIN according to env. Some inspect operation that use formatting did not work in podman due to formatting differences comparing to docker. The format string is changes to a form that fits both docker and podman. With the new format string, the index keyword is redundant hence removed. Signed-off-by: Or Mergi --- contrib/kind-common | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/contrib/kind-common b/contrib/kind-common index 2c4b7d445f..bbb7cda7e1 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -708,7 +708,7 @@ deploy_frr_external_container() { popd || exit 1 if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # Enable IPv6 forwarding in FRR - docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1 + $OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1 fi } @@ -735,40 +735,40 @@ deploy_bgp_external_server() { ip_family="ipv4" ipv6_network="" fi - docker rm -f bgpserver - docker network rm -f bgpnet - docker network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet - docker network connect bgpnet frr - docker run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec + $OCI_BIN rm -f bgpserver + $OCI_BIN network rm -f bgpnet + $OCI_BIN network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet + $OCI_BIN network connect bgpnet frr + $OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec # let's make the bgp external server have its default route towards FRR router so that we don't need to add routes during tests back to the pods in the # cluster for return traffic local bgp_network_frr_v4 bgp_network_frr_v6 - bgp_network_frr_v4=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "IPAddress"}}' frr) + bgp_network_frr_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.IPAddress}}' frr) echo "FRR kind network IPv4: ${bgp_network_frr_v4}" $OCI_BIN exec bgpserver ip route replace default via "$bgp_network_frr_v4" if [ "$PLATFORM_IPV6_SUPPORT" == true ] ; then - bgp_network_frr_v6=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "GlobalIPv6Address"}}' frr) + bgp_network_frr_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.GlobalIPv6Address}}' frr) echo "FRR kind network IPv6: ${bgp_network_frr_v6}" $OCI_BIN exec bgpserver ip -6 route replace default via "$bgp_network_frr_v6" fi # disable the default route to make sure the container only routes accross # directly connected or learnt networks (doing this at the very end since # docker changes the routing table when a new network is connected) - docker exec frr ip route delete default - docker exec frr ip route - docker exec frr ip -6 route delete default - docker exec frr ip -6 route + $OCI_BIN exec frr ip route delete default + $OCI_BIN exec frr ip route + $OCI_BIN exec frr ip -6 route delete default + $OCI_BIN exec frr ip -6 route } destroy_bgp() { - if docker ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then - docker stop bgpserver + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then + $OCI_BIN stop bgpserver fi - if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then - docker stop frr + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then + $OCI_BIN stop frr fi - if docker network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then - docker network rm bgpnet + if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then + $OCI_BIN network rm bgpnet fi } @@ -807,7 +807,7 @@ install_ffr_k8s() { echo "Attempting to reach frr-k8s webhook" kind export kubeconfig --name ovn while true; do -docker exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}') +$OCI_BIN exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}') [ \$? -eq 0 ] && exit 0 echo "Couldn't reach frr-k8s webhook, trying in 1s..." sleep 1s From 44b7719615a65f8cfc474d682be1804dde141509 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Tue, 1 Jul 2025 23:39:39 +0300 Subject: [PATCH 067/181] e2e: Enable testing BGP using podman When using podman, BGP test suite fails due to checks against the env container runtime which are not compatible with podman: - Inspecting network objects is not compatible due to diffrences in how podman and docker persist network objects - List containers using JSON format To overcome the above, change network inspect operation and container list using format to a form that compatible with bot docker and podman. Signed-off-by: Or Mergi --- test/e2e/containerengine/container_engine.go | 10 ++++++++++ test/e2e/infraprovider/providers/kind/kind.go | 10 ++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/test/e2e/containerengine/container_engine.go b/test/e2e/containerengine/container_engine.go index 12d96829b2..a9281fbb48 100644 --- a/test/e2e/containerengine/container_engine.go +++ b/test/e2e/containerengine/container_engine.go @@ -12,6 +12,16 @@ func (ce ContainerEngine) String() string { return string(ce) } +func (ce ContainerEngine) NetworkCIDRsFmt() string { + if ce == Podman { + return "{{json .Subnets }}" + } + if ce == Docker { + return "{{json .IPAM.Config }}" + } + return "" +} + const ( Docker ContainerEngine = "docker" Podman ContainerEngine = "podman" diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index 9e1fe63e47..f58a5bc746 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -414,7 +414,6 @@ func (c *contextKind) cleanUp() error { const ( nameFormat = "{{.Name}}" - inspectNetworkIPAMJSON = "{{json .IPAM.Config }}" inspectNetworkIPv4GWKeyStr = "{{ .NetworkSettings.Networks.%s.Gateway }}" inspectNetworkIPv4AddrKeyStr = "{{ .NetworkSettings.Networks.%s.IPAddress }}" inspectNetworkIPv4PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.IPPrefixLen }}" @@ -437,7 +436,7 @@ func isNetworkAttachedToContainer(networkName, containerName string) bool { func doesContainerNameExist(name string) bool { // check if it is present before retrieving logs - stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("Name=^%s$", name), "-q").CombinedOutput() + stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("name=^%s$", name), "-q").CombinedOutput() if err != nil { panic(fmt.Sprintf("failed to check if external container (%s) exists: %v (%s)", name, err, stdOut)) } @@ -466,13 +465,16 @@ func getNetwork(networkName string) (containerEngineNetwork, error) { return n, api.NotFound } configs := make([]containerEngineNetworkConfig, 0, 1) - dataBytes, err := exec.Command(containerengine.Get().String(), "network", "inspect", "-f", inspectNetworkIPAMJSON, networkName).CombinedOutput() + + ce := containerengine.Get() + netConfFmt := ce.NetworkCIDRsFmt() + dataBytes, err := exec.Command(ce.String(), "network", "inspect", "-f", netConfFmt, networkName).CombinedOutput() if err != nil { return n, fmt.Errorf("failed to extract network %q data: %v", networkName, err) } dataBytes = []byte(strings.Trim(string(dataBytes), "\n")) if err = json.Unmarshal(dataBytes, &configs); err != nil { - return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, inspectNetworkIPAMJSON, err) + return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, netConfFmt, err) } if len(configs) == 0 { return n, fmt.Errorf("failed to find any IPAM configuration for network %s", networkName) From 7588fd3a66c65fc05fdc870b9dd1c7c8de9ba0d3 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Wed, 11 Jun 2025 10:12:19 +0100 Subject: [PATCH 068/181] EgressIP: fix startup sync to add metadata Initial implementations erroneously assumed a CIDR for NATs logicalIP. Also, eip controller expects all OVN constructs that support EIP to have this metadata so if we cannot build this metadata then add dummy data so its cleaned up later by EIP controller. This was not caught by unit tests because the unit test also contained the assumption of only logical IP with no mask. It was not caught by upstream CI because we have no reboot tests. Signed-off-by: Martin Kennelly --- .../logical_router_policy_sync.go | 3 +- .../logical_router_policy_sync_test.go | 2 +- .../ovn/external_ids_syncer/nat/nat_sync.go | 22 ++++++--- .../external_ids_syncer/nat/nat_sync_test.go | 48 +++++++++---------- 4 files changed, 42 insertions(+), 33 deletions(-) diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go index 01cbd40512..8933e78521 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go @@ -101,7 +101,8 @@ func (syncer *LRPSyncer) syncEgressIPReRoutes() error { podInfo, err := cache.getPod(podIP) if err != nil { klog.Infof("Failed to find Logical Switch Port cache entry for pod IP %s: %v", podIP.String(), err) - continue + // pod not found, add dummy metadata that will be cleaned up by EIP controller sync. + podInfo = podNetInfo{namespace: "UNKNOWN", name: "UNKNOWN"} } ipFamily := getIPFamily(isIPv6) lrp.ExternalIDs = getEgressIPLRPReRouteDbIDs(eipName, podInfo.namespace, podInfo.name, ipFamily, defaultNetworkName, syncer.controllerName).GetExternalIDs() diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go index efebfb9c31..da0b0d2ff9 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync_test.go @@ -122,7 +122,7 @@ var _ = ginkgo.Describe("OVN Logical Router Syncer", func() { map[string]string{"name": egressIPName}, defaultNetworkControllerName)}, finalLRPs: []*nbdb.LogicalRouterPolicy{getReRouteLRP(podNamespace, podName, v4PodIPStr, 0, v4IPFamilyValue, v4PodNextHops, - map[string]string{"name": egressIPName}, + getEgressIPLRPReRouteDbIDs(egressIPName, "UNKNOWN", "UNKNOWN", v4IPFamilyValue, defaultNetworkName, defaultNetworkControllerName).GetExternalIDs(), defaultNetworkControllerName)}, v4ClusterSubnets: []*net.IPNet{v4PodClusterSubnet}, v4JoinSubnet: v4JoinSubnet, diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go index 617bddd411..cf9d433cc8 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go @@ -10,6 +10,7 @@ import ( libovsdbclient "github.com/ovn-org/libovsdb/client" "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -86,10 +87,10 @@ func (n *NATSyncer) syncEgressIPNATs() error { klog.Errorf("Expected NAT %s to contain 'name' as a key within its external IDs", nat.UUID) continue } - podIP, _, err := net.ParseCIDR(nat.LogicalIP) - if err != nil { - klog.Errorf("Failed to process logical IP %q of NAT %s", nat.LogicalIP, nat.UUID) - continue + // for egress IP, the logicalIP does not contain a mask. + podIP := net.ParseIP(nat.LogicalIP) + if podIP == nil { + return fmt.Errorf("failed to process logical IP %q of NAT %s", nat.LogicalIP, nat.UUID) } isV6 := utilsnet.IsIPv6(podIP) var ipFamily egressIPFamilyValue @@ -103,15 +104,15 @@ func (n *NATSyncer) syncEgressIPNATs() error { pod, found = v4PodCache.getPodByIP(podIP) } if !found { - klog.Errorf("Failed to find logical switch port that contains IP address %s", podIP.String()) - continue + // set it to unknown and the egress IP controller syncer will take care of removing it. + pod = podNetInfo{namespace: "UNKNOWN", name: "UNKNOWN"} + ipFamily = getFirstSupportIPFamily() } nat.ExternalIDs = getEgressIPNATDbIDs(eIPName, pod.namespace, pod.name, ipFamily, n.controllerName).GetExternalIDs() ops, err = libovsdbops.UpdateNATOps(n.nbClient, ops, nat) if err != nil { klog.Errorf("Failed to generate NAT ops for NAT %s: %v", nat.UUID, err) } - klog.Infof("## martin found %d nats", len(ops)) } _, err = libovsdbops.TransactAndCheck(n.nbClient, ops) @@ -176,3 +177,10 @@ func getEgressIPNATDbIDs(eIPName, podNamespace, podName string, ipFamily egressI libovsdbops.IPFamilyKey: string(ipFamily), }) } + +func getFirstSupportIPFamily() egressIPFamilyValue { + if config.IPv4Mode { + return ipFamilyValueV4 + } + return ipFamilyValueV6 +} diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go index 9c0c9fa18d..58d8b54045 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync_test.go @@ -26,22 +26,22 @@ const ( egressIP = "10.10.10.10" nat1UUID = "nat-1-UUID" nat2UUID = "nat-2-UUID" - pod1V4CIDRStr = "10.128.0.5/32" - pod1V6CIDRStr = "2001:0000:130F:0000:0000:09C0:876A:130B/128" + pod1V4Str = "10.128.0.5" + pod1V6Str = "2001:0000:130F:0000:0000:09C0:876A:130B" pod1Namespace = "ns1" pod1Name = "pod1" - pod2V4CIDRStr = "10.128.0.6/32" - pod2V6CIDRStr = "2001:0000:130F:0000:0000:09C0:876A:130A/128" + pod2V4Str = "10.128.0.6" + pod2V6Str = "2001:0000:130F:0000:0000:09C0:876A:130A" pod2Namespace = "ns1" pod2Name = "pod2" defaultNetworkControllerName = "default-network-controller" ) var ( - pod1V4IPNet = testing.MustParseIPNet(pod1V4CIDRStr) - pod1V6IPNet = testing.MustParseIPNet(pod1V6CIDRStr) - pod2V4IPNet = testing.MustParseIPNet(pod2V4CIDRStr) - pod2V6IPNet = testing.MustParseIPNet(pod2V6CIDRStr) + pod1V4IP = testing.MustParseIP(pod1V4Str) + pod1V6IP = testing.MustParseIP(pod1V6Str) + pod2V4IP = testing.MustParseIP(pod2V4Str) + pod2V6IP = testing.MustParseIP(pod2V6Str) legacyExtIDs = map[string]string{legacyEIPNameExtIDKey: egressIPName} pod1V4ExtIDs = getEgressIPNATDbIDs(egressIPName, pod1Namespace, pod1Name, ipFamilyValueV4, defaultNetworkControllerName).GetExternalIDs() pod1V6ExtIDs = getEgressIPNATDbIDs(egressIPName, pod1Namespace, pod1Name, ipFamilyValueV6, defaultNetworkControllerName).GetExternalIDs() @@ -54,64 +54,64 @@ var _ = ginkgo.Describe("NAT Syncer", func() { ginkgo.DescribeTable("egress NATs", func(sync natSync) { performTest(defaultNetworkControllerName, sync.initialNATs, sync.finalNATs, sync.pods) }, ginkgo.Entry("converts legacy IPv4 NATs", natSync{ - initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, legacyExtIDs)}, - finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, pod1V4ExtIDs)}, + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4Str, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4Str, egressIP, pod1V4ExtIDs)}, pods: podsNetInfo{ { - []net.IP{pod1V4IPNet.IP}, + []net.IP{pod1V4IP}, pod1Namespace, pod1Name, }, { - []net.IP{pod2V4IPNet.IP}, + []net.IP{pod2V4IP}, pod2Namespace, pod2Name, }, }, }), ginkgo.Entry("converts legacy IPv6 NATs", natSync{ - initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, legacyExtIDs)}, - finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6Str, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6Str, egressIP, pod1V6ExtIDs)}, pods: podsNetInfo{ { - []net.IP{pod1V6IPNet.IP}, + []net.IP{pod1V6IP}, pod1Namespace, pod1Name, }, { - []net.IP{pod2V6IPNet.IP}, + []net.IP{pod2V6IP}, pod2Namespace, pod2Name, }, }, }), ginkgo.Entry("converts legacy dual stack NATs", natSync{ - initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, legacyExtIDs), getSNAT(nat2UUID, pod1V6CIDRStr, egressIP, legacyExtIDs)}, - finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4CIDRStr, egressIP, pod1V4ExtIDs), getSNAT(nat2UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4Str, egressIP, legacyExtIDs), getSNAT(nat2UUID, pod1V6Str, egressIP, legacyExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V4Str, egressIP, pod1V4ExtIDs), getSNAT(nat2UUID, pod1V6Str, egressIP, pod1V6ExtIDs)}, pods: podsNetInfo{ { - []net.IP{pod1V4IPNet.IP, pod1V6IPNet.IP}, + []net.IP{pod1V4IP, pod1V6IP}, pod1Namespace, pod1Name, }, { - []net.IP{pod2V4IPNet.IP, pod2V6IPNet.IP}, + []net.IP{pod2V4IP, pod2V6IP}, pod2Namespace, pod2Name, }, }, }), ginkgo.Entry("doesn't alter NAT with correct external IDs", natSync{ - initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, - finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6CIDRStr, egressIP, pod1V6ExtIDs)}, + initialNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6Str, egressIP, pod1V6ExtIDs)}, + finalNATs: []*nbdb.NAT{getSNAT(nat1UUID, pod1V6Str, egressIP, pod1V6ExtIDs)}, pods: podsNetInfo{ { - []net.IP{pod1V4IPNet.IP, pod1V6IPNet.IP}, + []net.IP{pod1V4IP, pod1V6IP}, pod1Namespace, pod1Name, }, { - []net.IP{pod2V4IPNet.IP, pod2V6IPNet.IP}, + []net.IP{pod2V4IP, pod2V6IP}, pod2Namespace, pod2Name, }, From 68db55ebec7162b54e100d4ca0ad2b84fd22fe86 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Mon, 7 Jul 2025 11:26:49 +0100 Subject: [PATCH 069/181] EIP OVN startup syncer: fix processing of ovn constructs The startup syncer was removing OVN constructs due to logic bugs introduced when EIP code was refactored for UDN. The are added again when eip controller syncs but this causes interruption. 1. Due to poor naming, enforcement of types and programmer error we were mixing up variables between a pod IP and an EIP IP. See: nodeName, ok := cache.egressIPIPToNodeCache[parsedLogicalIP.String()] parsedLogicalIP is a pod IP and not an EIP IP. 2. When iterating over the existing config for an EIP, we should delete config for LRPs where an EIP doesn't exist. 3. Remove LRPs when a network isnt found Signed-off-by: Martin Kennelly --- go-controller/pkg/ovn/egressip.go | 78 +++++++++++++++++++------------ 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index d53ba5e633..2bcedbdbec 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -1138,6 +1138,8 @@ func (e *EgressIPController) isLocalZoneNode(node *corev1.Node) bool { type egressIPCache struct { // egressIP name -> network name -> cache egressIPNameToPods map[string]map[string]selectedPods + // egressIP name -> to assigned Node names + egressIPNameToAssignedNodes map[string][]string // egressLocalNodes will contain all nodes that are local // to this zone which are serving this egressIP object.. // This will help sync SNATs @@ -1154,7 +1156,7 @@ type egressIPCache struct { } type nodeNetworkRedirects struct { - // node name -> network name -> redirect IPs + // network name -> node name -> redirect IPs cache map[string]map[string]redirectIPs } @@ -1600,21 +1602,36 @@ func (e *EgressIPController) syncPodAssignmentCache(egressIPCache egressIPCache) // It also removes stale nexthops from router policies used by EgressIPs. // Upon failure, it may be invoked multiple times in order to avoid a pod restart. func (e *EgressIPController) syncStaleEgressReroutePolicy(cache egressIPCache) error { - for _, networkCache := range cache.egressIPNameToPods { + for eipName, networkCache := range cache.egressIPNameToPods { for networkName, data := range networkCache { logicalRouterPolicyStaleNexthops := []*nbdb.LogicalRouterPolicy{} + // select LRPs scoped to the correct LRP priority, network and EIP name p := func(item *nbdb.LogicalRouterPolicy) bool { if item.Priority != types.EgressIPReroutePriority || item.ExternalIDs[libovsdbops.NetworkKey.String()] != networkName { return false } - egressIPName, _ := getEIPLRPObjK8MetaData(item.ExternalIDs) - if egressIPName == "" { + networkNodeRedirectCache, ok := cache.egressNodeRedirectsCache.cache[networkName] + if !ok || len(networkNodeRedirectCache) == 0 { + klog.Infof("syncStaleEgressReroutePolicy found invalid logical router policy (UUID: %s) because no assigned Nodes for EgressIP %s", item.UUID, eipName) + return true + } + extractedEgressIPName, _ := getEIPLRPObjK8MetaData(item.ExternalIDs) + if extractedEgressIPName == "" { klog.Errorf("syncStaleEgressReroutePolicy found logical router policy (UUID: %s) with invalid meta data associated with network %s", item.UUID, networkName) - return false + return true + } + if extractedEgressIPName != eipName { + // remove if there's no reference to this EIP name + _, ok := cache.egressIPNameToPods[extractedEgressIPName] + return !ok } splitMatch := strings.Split(item.Match, " ") - logicalIP := splitMatch[len(splitMatch)-1] - parsedLogicalIP := net.ParseIP(logicalIP) + podIPStr := splitMatch[len(splitMatch)-1] + podIP := net.ParseIP(podIPStr) + if podIP == nil { + klog.Infof("syncStaleEgressReroutePolicy found invalid LRP with broken match with UID %q", item.UUID) + return true + } egressPodIPs := sets.NewString() // Since LRPs are created only for pods local to this zone // we need to care about only those pods. Nexthop for them will @@ -1624,31 +1641,24 @@ func (e *EgressIPController) syncStaleEgressReroutePolicy(cache egressIPCache) e for _, podIPs := range data.egressLocalPods { egressPodIPs.Insert(podIPs.UnsortedList()...) } - if !egressPodIPs.Has(parsedLogicalIP.String()) { - klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no nexthop or stale logical ip: %v", egressIPName, item) + if !egressPodIPs.Has(podIP.String()) { + klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no nexthop or stale logical ip: %v", extractedEgressIPName, item) return true } // Check for stale nexthops that may exist in the logical router policy and store that in logicalRouterPolicyStaleNexthops. // Note: adding missing nexthop(s) to the logical router policy is done outside the scope of this function. staleNextHops := []string{} for _, nexthop := range item.Nexthops { - nodeName, ok := cache.egressIPIPToNodeCache[parsedLogicalIP.String()] - if ok { - klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no node assigned to logical ip: %v", egressIPName, item) - return true - } - networksRedirects, ok := cache.egressNodeRedirectsCache.cache[nodeName] - if ok { - klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no network in cache: %v", egressIPName, item) - return true - } - redirects, ok := networksRedirects[networkName] - if !ok { - klog.Infof("syncStaleEgressReroutePolicy will delete %s due to no redirects for network in cache: %v", egressIPName, item) - return true + // ensure valid next hop by iterating through the node config + var isFound bool // isFound is true, if the next hop IP is found within the set of assigned nodes + for _, nodeRedirect := range networkNodeRedirectCache { + if nodeRedirect.containsIP(nexthop) { + isFound = true + break + } } - //FIXME: be more specific about which is the valid next hop instead of relying on verifying if the IP is within a valid set of IPs. - if !redirects.containsIP(nexthop) { + if !isFound { + //FIXME: be more specific about which is the valid next hop instead of relying on verifying if the IP is within a valid set of IPs. staleNextHops = append(staleNextHops, nexthop) } } @@ -1907,9 +1917,12 @@ func (e *EgressIPController) generateCacheForEgressIP() (egressIPCache, error) { // This will help sync SNATs egressLocalNodesCache := sets.New[string]() cache.egressLocalNodesCache = egressLocalNodesCache - // egressIP name -> node name - egressNodesCache := make(map[string]string, 0) - cache.egressIPIPToNodeCache = egressNodesCache + // egressIP name -> nodes where the IPs are assigned + egressIPNameNodesCache := make(map[string][]string, 0) + cache.egressIPNameToAssignedNodes = egressIPNameNodesCache + // egressIP IP -> node name. Assigned node for EIP. + egressIPIPNodeCache := make(map[string]string, 0) + cache.egressIPIPToNodeCache = egressIPIPNodeCache cache.markCache = make(map[string]string) egressIPs, err := e.watchFactory.GetEgressIPs() if err != nil { @@ -1922,11 +1935,18 @@ func (e *EgressIPController) generateCacheForEgressIP() (egressIPCache, error) { } cache.markCache[egressIP.Name] = mark.String() egressIPsCache[egressIP.Name] = make(map[string]selectedPods, 0) + egressIPNameNodesCache[egressIP.Name] = make([]string, 0, len(egressIP.Status.Items)) for _, status := range egressIP.Status.Items { + eipIP := net.ParseIP(status.EgressIP) + if eipIP == nil { + klog.Errorf("Failed to parse EgressIP %s IP %q from status", egressIP.Name, status.EgressIP) + continue + } + egressIPIPNodeCache[eipIP.String()] = status.Node if localZoneNodes.Has(status.Node) { egressLocalNodesCache.Insert(status.Node) } - egressNodesCache[status.EgressIP] = status.Node + egressIPNameNodesCache[egressIP.Name] = append(egressIPNameNodesCache[egressIP.Name], status.Node) } namespaces, err = e.watchFactory.GetNamespacesBySelector(egressIP.Spec.NamespaceSelector) if err != nil { From 41a91515866aa607e3159857c64d8e7b37d54e02 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Mon, 7 Jul 2025 11:41:09 +0100 Subject: [PATCH 070/181] EIP OVN controller: remove possibility of crash, improve logging and readability No func changes. Check if obj is nil post parsing IP. Improve logging of stale OVN config. Signed-off-by: Martin Kennelly --- go-controller/pkg/ovn/egressip.go | 35 ++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 2bcedbdbec..f90365aa0a 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -1446,7 +1446,7 @@ func (e *EgressIPController) syncStaleGWMarkRules(egressIPCache egressIPCache) e continue } for networkName, podCache := range networkPodCache { - for eIP, nodeName := range egressIPCache.egressIPIPToNodeCache { + for eIPIP, nodeName := range egressIPCache.egressIPIPToNodeCache { if !egressIPCache.egressLocalNodesCache.Has(nodeName) { continue } @@ -1460,7 +1460,7 @@ func (e *EgressIPController) syncStaleGWMarkRules(egressIPCache egressIPCache) e return fmt.Errorf("failed to create new network %s: %v", networkName, err) } routerName := ni.GetNetworkScopedGWRouterName(nodeName) - isEIPIPv6 := utilnet.IsIPv6String(eIP) + isEIPIPv6 := utilnet.IsIPv6String(eIPIP) for podKey, podIPs := range podCache.egressLocalPods { ops, err = processPodFn(ops, eIPName, podKey, egressIPCache.markCache[eIPName], routerName, networkName, podIPs, isEIPIPv6) if err != nil { @@ -1679,7 +1679,14 @@ func (e *EgressIPController) syncStaleEgressReroutePolicy(cache egressIPCache) e // Update Logical Router Policies that have stale nexthops. Notice that we must do this separately // because logicalRouterPolicyStaleNexthops must be populated first - klog.Infof("syncStaleEgressReroutePolicy will remove stale nexthops for network %s: %+v", networkName, logicalRouterPolicyStaleNexthops) + for _, staleNextHopLogicalRouterPolicy := range logicalRouterPolicyStaleNexthops { + if staleNextHopLogicalRouterPolicy.Nexthop == nil { + continue + } + klog.Infof("syncStaleEgressReroutePolicy will remove stale nexthops for LRP %q for network %s: %s", + staleNextHopLogicalRouterPolicy.UUID, networkName, *staleNextHopLogicalRouterPolicy.Nexthop) + } + err = libovsdbops.DeleteNextHopsFromLogicalRouterPolicies(e.nbClient, cache.networkToRouter[networkName], logicalRouterPolicyStaleNexthops...) if err != nil { return fmt.Errorf("unable to remove stale next hops from logical router policies for network %s: %v", networkName, err) @@ -1709,7 +1716,13 @@ func (e *EgressIPController) syncStaleSNATRules(egressIPCache egressIPCache) err return false } egressIPName := egressIPMeta[0] - parsedLogicalIP := net.ParseIP(item.LogicalIP).String() + // check logical IP maps to a valid pod + parsedPodIP := net.ParseIP(item.LogicalIP) + if parsedPodIP == nil { + klog.Errorf("syncStaleSNATRules found invalid logical IP for NAT with UID %q", item.UUID) + return true + } + parsedPodIPStr := parsedPodIP.String() cacheEntry, exists := egressIPCache.egressIPNameToPods[egressIPName][types.DefaultNetworkName] egressPodIPs := sets.NewString() if exists { @@ -1722,7 +1735,7 @@ func (e *EgressIPController) syncStaleSNATRules(egressIPCache egressIPCache) err egressPodIPs.Insert(podIPs.UnsortedList()...) } } - if !exists || !egressPodIPs.Has(parsedLogicalIP) { + if !exists || !egressPodIPs.Has(parsedPodIPStr) { klog.Infof("syncStaleSNATRules will delete %s due to logical ip: %v", egressIPName, item) return true } @@ -1731,9 +1744,15 @@ func (e *EgressIPController) syncStaleSNATRules(egressIPCache egressIPCache) err klog.Errorf("syncStaleSNATRules failed to find default network in networks cache") return false } - if node, ok := egressIPCache.egressIPIPToNodeCache[item.ExternalIP]; !ok || !cacheEntry.egressLocalPods[types.DefaultNetworkName].Has(node) || - item.LogicalPort == nil || *item.LogicalPort != ni.GetNetworkScopedK8sMgmtIntfName(node) { - klog.Infof("syncStaleSNATRules will delete %s due to external ip or stale logical port: %v", egressIPName, item) + // check external IP maps to a valid EgressIP IP and its assigned to a Node + node, ok := egressIPCache.egressIPIPToNodeCache[item.ExternalIP] + if !ok { + klog.Infof("syncStaleSNATRules found NAT %q without EIP assigned to a Node", item.UUID) + return true + } + // check logical port is set and correspondes to the correct egress node + if item.LogicalPort == nil || *item.LogicalPort != ni.GetNetworkScopedK8sMgmtIntfName(node) { + klog.Infof("syncStaleSNATRules found NAT %q with invalid logical port", item.UUID) return true } return false From 053585e9a7743c1fea5417449e987fe9c8f7c52e Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Mon, 7 Jul 2025 11:43:49 +0100 Subject: [PATCH 071/181] OVN EIP startup syncer: add UTs for pod / node deleted Removes config for deleted nodes/pods while controller was down and ensures ovn config is removed while preserving valid config. Signed-off-by: Martin Kennelly --- go-controller/pkg/ovn/egressip_test.go | 549 +++++++++++++++++++++++++ 1 file changed, 549 insertions(+) diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index 43ec170acb..b05422bf65 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -52,6 +52,8 @@ const ( podV4IP3 = "10.128.1.3" podV4IP4 = "10.128.1.4" podV6IP = "ae70::66" + podV6IP2 = "be70::66" + podV6IP3 = "be70::67" v6GatewayIP = "ae70::1" v6Node1Subnet = "ae70::66/64" v6Node2Subnet = "be70::66/64" @@ -12901,6 +12903,553 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" }) }) + + ginkgo.Context("Sync", func() { + ginkgo.It("removes config for previously selected pods on a deleted Node", func() { + // node 1 is local zone and egress Node. + // pod was on node 2 but it is deleted. Node 2 previously was also an egress Node. + app.Action = func(*cli.Context) error { + config.OVNKubernetesFeature.EnableInterconnect = true + // dual stack cluster + config.IPv4Mode = true + config.IPv6Mode = true + egressNamespace := newNamespace(eipNamespace) + egressPod := corev1.Pod{ + ObjectMeta: newPodMeta(eipNamespace, podName, egressPodLabel), + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "containerName", + Image: "containerImage", + }, + }, + NodeName: node1Name, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + PodIP: podV4IP, + PodIPs: []corev1.PodIP{{IP: podV4IP}, {IP: podV6IP}}, + }, + } + // node 1 (local zone) + node1IPv4 := "192.168.126.210" + Node1IPv4CIDR := node1IPv4 + "/24" + node1IPv6 := "fc00:f853:ccd:e793::30" + node1IPv6CIDR := node1IPv6 + "/64" + node1TranSwitchIPv4CIDR := "100.88.0.2/16" + node1TranSwitchIPv6CIDR := "fd97::2/64" + _, node1IPV4Net, _ := net.ParseCIDR(v4Node1Subnet) + _, node1IPV6Net, _ := net.ParseCIDR(v6Node1Subnet) + nodeAnnotations := map[string]string{ + "k8s.ovn.org/l3-gateway-config": `{"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.12/24", "next-hop":"192.168.126.1"}}`, + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": fmt.Sprintf("{\"default\":{\"ipv4\":\"%s\",\"ipv6\":\"%s\"}}", nodeLogicalRouterIfAddrV4, nodeLogicalRouterIfAddrV6), + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", Node1IPv4CIDR, node1IPv6CIDR), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\", \"%s\"]}", v4Node1Subnet, v6Node1Subnet), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s\", \"ipv6\": \"%s\"}", node1TranSwitchIPv4CIDR, node1TranSwitchIPv6CIDR), + "k8s.ovn.org/zone-name": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\",\"%s\"]", Node1IPv4CIDR, node1IPv6CIDR), + } + node1 := getNodeObj(node1Name, nodeAnnotations, map[string]string{}) // add node to avoid error-ing out on transit switch IP fetch + // node 2 - deleted (remote zone) + node2TranSwitchIPv6 := "fd97::3" + eipIPv4 := "192.168.126.200" + eipIPv6 := "0:0:0:0:0:feff:c0a8:8e0d" + deletedPodIPv4 := podV4IP2 + // dual IP family EIP selecting one pod in local zone + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{ + eipIPv4, + eipIPv6, + }, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: eipIPv4, + }, + // Previous was set to node 2 and Node was deleted while local zone EIP controller was down. + //{ + // Node: node2Name, + // EgressIP: eipIPv6, + //}, + }, + }, + } + ginkgo.By("start OVN DBs with valid and invalid (pod doesn't exist..) OVN config") + node1NatLogicalPortName := "k8s-" + node1Name + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + // LRPs to support EIP assigned to a remote node node thats deleted while the controller was down + // Valid LRP for IPv4 egress node. IPv4 egress Node is local. IPv6 egress node is remote and deleted but ovn config remains + getReRoutePolicy(podV4IP, "4", "valid-reroute-ipv4-UUID", + nodeLogicalRouterIPv4, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV4, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + // invalid LRP for IPv6 because remove node is deleted + getReRoutePolicy(podV6IP, "6", "invalid-reroute-ipv6-UUID", + []string{node2TranSwitchIPv6}, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV6, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + // NATs to support EIP assigned to the local node + // valid NAT + &nbdb.NAT{ + UUID: "valid-nat-ipv4-UUID", + LogicalIP: podV4IP, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + // invalid NAT for a deleted pod on remote node + &nbdb.NAT{ + UUID: "invalid-nat-ipv4-UUID", + LogicalIP: deletedPodIPv4, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressNamespace.Namespace, "deletedpod", IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"valid-reroute-ipv4-UUID", "invalid-reroute-ipv6-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + Nat: []string{"valid-nat-ipv4-UUID", "invalid-nat-ipv4-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1.Name + "-UUID", + Name: "k8s-" + node1.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + Ports: []string{"k8s-" + node1.Name + "-UUID"}, + }, + }, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + i, podIPv4Net, _ := net.ParseCIDR(podV4IP + "/23") + podIPv4Net.IP = i + i, podIPv6Net, _ := net.ParseCIDR(podV6IP + "/23") + podIPv6Net.IP = i + fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{podIPv4Net, podIPv6Net}) + + // hack pod to be in the provided zone + fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) + fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) + fakeOvn.controller.localZoneNodes.Store(node1Name, true) + fakeOvn.controller.localZoneNodes.Store(node2Name, false) + + err := fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("ensuring cleanup of invalid LRP and NAT") + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV6IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) + expectedDatabaseState := []libovsdbtest.TestData{ + getReRoutePolicy(podV4IP, "4", "valid-reroute-ipv4-UUID", + nodeLogicalRouterIPv4, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV4, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + &nbdb.NAT{ + UUID: "valid-egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"valid-reroute-ipv4-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Nat: []string{"valid-egressip-nat-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1.Name + "-UUID", + Name: "k8s-" + node1.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + Ports: []string{"k8s-" + node1.Name + "-UUID"}, + }, + egressIPServedPodsASv4, + egressIPServedPodsASv6, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + ginkgo.By("ensure config is consistent") + gomega.Consistently(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.It("remove invalid OVN config for deleted pod", func() { + // removes invalid SNAT/NAT for a pod that was selected by an EIP but was removed while controller was not running and therefore OVN config should be removed + // does not modify valid SNAT/NAT + // further references to "local" or "remote" imply local or remote OVN zone for IC. + // one EIP object with two assigned IPs of different IP families (v4 and v6) which select one pod that's local + app.Action = func(*cli.Context) error { + config.OVNKubernetesFeature.EnableInterconnect = true + // dual stack cluster + config.IPv4Mode = true + config.IPv6Mode = true + egressPod := corev1.Pod{ + ObjectMeta: newPodMeta(eipNamespace, podName, egressPodLabel), + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "containerName", + Image: "containerImage", + }, + }, + NodeName: node1Name, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + PodIP: podV4IP, + PodIPs: []corev1.PodIP{{IP: podV4IP}, {IP: podV6IP}}, + }, + } + // deletedPodIP is a pod IP of a Pod that was deleted while eip controller was not running therefore config will exist in OVN DBs to support EIP + deletedPodIPv4, deletedPod2IPv4, deletedPodIPv6, deletedPod2IPv6 := podV4IP2, "10.128.0.20", podV6IP2, podV6IP3 + egressNamespace := newNamespace(eipNamespace) + // node 1 (local zone) + node1IPv4 := "192.168.126.210" + Node1IPv4CIDR := node1IPv4 + "/24" + node1IPv6 := "fc00:f853:ccd:e793::30" + node1IPv6CIDR := node1IPv6 + "/64" + node1TranSwitchIPv4CIDR := "100.88.0.2/16" + node1TranSwitchIPv6CIDR := "fd97::2/64" + _, node1IPV4Net, _ := net.ParseCIDR(v4Node1Subnet) + _, node1IPV6Net, _ := net.ParseCIDR(v6Node1Subnet) + nodeAnnotations := map[string]string{ + "k8s.ovn.org/l3-gateway-config": `{"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.12/24", "next-hop":"192.168.126.1"}}`, + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": fmt.Sprintf("{\"default\":{\"ipv4\":\"%s\",\"ipv6\":\"%s\"}}", nodeLogicalRouterIfAddrV4, nodeLogicalRouterIfAddrV6), + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", Node1IPv4CIDR, node1IPv6CIDR), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\", \"%s\"]}", v4Node1Subnet, v6Node1Subnet), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s\", \"ipv6\": \"%s\"}", node1TranSwitchIPv4CIDR, node1TranSwitchIPv6CIDR), + "k8s.ovn.org/zone-name": node1Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\",\"%s\"]", Node1IPv4CIDR, node1IPv6CIDR), + } + node1 := getNodeObj(node1Name, nodeAnnotations, map[string]string{}) // add node to avoid error-ing out on transit switch IP fetch + // node 2 (remote zone) + node2IPv4 := "192.168.126.202" + node2IPv4CIDR := node2IPv4 + "/24" + node2IPv6 := "fc00:f853:cce:e793::20" + node2IPv6CIDR := node2IPv6 + "/64" + node2TranSwitchIPv4 := "100.88.0.3" + node2TranSwitchIPv4CIDR := node2TranSwitchIPv4 + "/16" + node2TranSwitchIPv6 := "fd97::3" + node2TranSwitchIPv6CIDR := node2TranSwitchIPv6 + "/64" + _, node2IPV4Net, _ := net.ParseCIDR(v4Node2Subnet) + _, node2IPV6Net, _ := net.ParseCIDR(v6Node2Subnet) + nodeAnnotations = map[string]string{ + "k8s.ovn.org/node-gateway-router-lrp-ifaddrs": fmt.Sprintf("{\"default\":{\"ipv4\":\"%s\",\"ipv6\":\"%s\"}}", node2LogicalRouterIfAddrV4, node2LogicalRouterIfAddrV6), + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\", \"ipv6\": \"%s\"}", node2IPv4CIDR, node2IPv6CIDR), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":[\"%s\", \"%s\"]}", v4Node2Subnet, v6Node2Subnet), + "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s\", \"ipv6\": \"%s\"}", node2TranSwitchIPv4CIDR, node2TranSwitchIPv6CIDR), + "k8s.ovn.org/zone-name": node2Name, + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\",\"%s\"]", node2IPv4CIDR, node2IPv6CIDR), + } + node2 := getNodeObj(node2Name, nodeAnnotations, map[string]string{}) + eipIPv4 := "192.168.126.200" + eipIPv6 := "0:0:0:0:0:feff:c0a8:8e0d" + // dual IP family EIP selecting one pod in local zone + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{ + eipIPv4, + eipIPv6, + }, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: node1Name, + EgressIP: eipIPv4, + }, + { + Node: node2Name, + EgressIP: eipIPv6, + }, + }, + }, + } + ginkgo.By("start OVN DBs with valid and invalid ( 2 pods don't exist..) OVN config") + node1NatLogicalPortName := "k8s-" + node1Name + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + // LRPs to support EIP assigned to a remote node + // valid LRP for IPv4/IPv6. IPv4 Egress Node is local, IPv6 is remote + getReRoutePolicy(podV4IP, "4", "valid-reroute-ipv4-UUID", + nodeLogicalRouterIPv4, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV4, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + getReRoutePolicy(podV6IP, "6", "valid-reroute-ipv6-UUID", + []string{node2TranSwitchIPv6}, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV6, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + // invalid lrps to redirect to the remote egress node for deleted pods + getReRoutePolicy(deletedPodIPv6, "6", "invalid-reroute-ipv6-UUID", + []string{node2TranSwitchIPv6}, getEgressIPLRPReRouteDbIDs(eIP.Name, "UNKNOWN", "UNKNOWN", + IPFamilyValueV6, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + getReRoutePolicy(deletedPod2IPv6, "6", "invalid-reroute2-ipv6-UUID", + []string{node2TranSwitchIPv6}, getEgressIPLRPReRouteDbIDs(eIP.Name, "UNKNOWN", "UNKNOWN", + IPFamilyValueV6, types.DefaultNetworkName, DefaultNetworkControllerName).GetExternalIDs()), + // NATs to support EIP assigned to the local node + // valid NAT + &nbdb.NAT{ + UUID: "valid-nat-ipv4-UUID", + LogicalIP: podV4IP, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + // invalid NATs + &nbdb.NAT{ + UUID: "invalid-nat-ipv4-UUID", + LogicalIP: deletedPodIPv4, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, "UNKNOWN", "UNKNOWN", IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "invalid-nat2-ipv4-UUID", + LogicalIP: deletedPod2IPv4, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, "UNKNOWN", "UNKNOWN", IPFamilyValueV4, DefaultNetworkControllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"valid-reroute-ipv4-UUID", "valid-reroute-ipv6-UUID", "invalid-reroute-ipv6-UUID", "invalid-reroute2-ipv6-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + Nat: []string{"valid-nat-ipv4-UUID", "invalid-nat-ipv4-UUID"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1.Name + "-UUID", + Name: "k8s-" + node1.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node2.Name + "-UUID", + Name: "k8s-" + node2.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fd " + util.GetNodeManagementIfAddr(node2IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fd " + util.GetNodeManagementIfAddr(node2IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + Ports: []string{"k8s-" + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: node2.Name + "-UUID", + Name: node2.Name, + Ports: []string{"k8s-" + node2.Name + "-UUID"}, + }, + }, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace}, + }, + &corev1.PodList{ + Items: []corev1.Pod{egressPod}, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + ) + i, podIPv4Net, _ := net.ParseCIDR(podV4IP + "/23") + podIPv4Net.IP = i + i, podIPv6Net, _ := net.ParseCIDR(podV6IP + "/23") + podIPv6Net.IP = i + fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{podIPv4Net, podIPv6Net}) + + // hack pod to be in the provided zone + fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) + fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) + fakeOvn.controller.localZoneNodes.Store(node1Name, true) + fakeOvn.controller.localZoneNodes.Store(node2Name, false) + + err := fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("ensuring cleanup of invalid LRP and NAT") + egressIPServedPodsASv4, egressIPServedPodsASv6 := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV6IP}, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName) + expectedDatabaseState := []libovsdbtest.TestData{ + // LRPs to support EIP assigned to a remote node + // valid LRP for IPv4/IPv6. IPv4 Egress Node is local, IPv6 is remote + getReRoutePolicy(podV4IP, "4", "valid-reroute-ipv4-UUID", + nodeLogicalRouterIPv4, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + getReRoutePolicy(podV6IP, "6", "valid-reroute-ipv6-UUID", + []string{node2TranSwitchIPv6}, getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, + IPFamilyValueV6, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs()), + // valid NAT + &nbdb.NAT{ + UUID: "valid-egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: eipIPv4, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &node1NatLogicalPortName, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"valid-reroute-ipv4-UUID", "valid-reroute-ipv6-UUID"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name, + Networks: []string{nodeLogicalRouterIfAddrV6, nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1Name, + UUID: types.GWRouterPrefix + node1Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + Nat: []string{"valid-egressip-nat-UUID"}, + Options: map[string]string{"dynamic_neigh_routers": "false"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node1.Name + "-UUID", + Name: "k8s-" + node1.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "k8s-" + node2.Name + "-UUID", + Name: "k8s-" + node2.Name, + Addresses: []string{"fe:1a:b2:3f:0e:fd " + util.GetNodeManagementIfAddr(node2IPV4Net).IP.String(), + "fe:1a:b2:3f:0e:fd " + util.GetNodeManagementIfAddr(node2IPV6Net).IP.String()}, + }, + &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + Ports: []string{"k8s-" + node1.Name + "-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: node2.Name + "-UUID", + Name: node2.Name, + Ports: []string{"k8s-" + node2.Name + "-UUID"}, + }, + egressIPServedPodsASv4, + egressIPServedPodsASv6, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + ginkgo.By("ensure config is consistent") + gomega.Consistently(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + }) // TEST UTILITY FUNCTIONS; From 1448d5ab14337b647f3e3034ea4ffc077431979a Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 7 May 2025 15:37:29 -0400 Subject: [PATCH 072/181] Drop in_port from ip dispatch OF rule The in_port was to match on IP traffic coming from the physical link to be dispatched to conntrack and table 1 to find out whether the packet was a reply to the host or to OVN. We are now conntracking these packets also as they go to localnet ports attached to the bridge. Therefore we need to also match on packets from those ports. We do not want traffic from OVN or from LOCAL to hit this flow, but that should be avoided by higher priority flows. Signed-off-by: Tim Rozet (cherry picked from commit 8c1594ee55408ae4748d7322bd093e2acbc0ce98) --- go-controller/pkg/node/gateway_shared_intf.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 00c96cef1a..8d771d5054 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -2003,11 +2003,12 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin } if ofPortPhys != "" { - // table 0, packets coming from external. Send it through conntrack and + // table 0, packets coming from external or other localnet ports. Send it through conntrack and // resubmit to table 1 to know the state and mark of the connection. + // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ip, "+ - "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) + fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", + defaultOpenFlowCookie, config.Default.ConntrackZone)) } } From 3de7eade84e858cac080bb9386e4301d901b726d Mon Sep 17 00:00:00 2001 From: Riccardo Ravaioli Date: Wed, 7 May 2025 10:20:51 -0400 Subject: [PATCH 073/181] Reapply "Add flow for host -> localnet on same node" This reverts commit ebb73398310c882902f0f8b297bb8386d039ecfc. Signed-off-by: Riccardo Ravaioli --- go-controller/pkg/node/gateway_shared_intf.go | 107 +++++++++++------- 1 file changed, 66 insertions(+), 41 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 8d771d5054..7556aa54f7 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1956,9 +1956,10 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) - // Allow OVN->Host traffic on the same node + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) } } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP @@ -2053,9 +2054,10 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) - // Allow OVN->Host traffic on the same node + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) } } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP @@ -2250,23 +2252,15 @@ func pmtudDropFlows(bridge *bridgeConfiguration, ipAddrs []string) []string { return flows } -// ovnToHostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic from the OVN network to the host network -// when the destination is on the same node as the sender. This is necessary for pods in the default network to reach -// localnet pods on the same node, when the localnet is mapped to breth0. The expected srcMAC is the MAC address of breth0 -// and the expected hostSubnets is the host subnets found on the node primary interface. -func ovnToHostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { - var inPort, ctMark, ipFamily, ipFamilyDest string +// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: +// a. from pods in the OVN network to pods in a localnet network, on the same node +// b. from pods on the host to pods in a localnet network, on the same node +// when the localnet is mapped to breth0. +// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node +// primary interface. +func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { var flows []string - - if config.Gateway.Mode == config.GatewayModeShared { - inPort = netConfig.ofPortPatch - ctMark = netConfig.masqCTMark - } else if config.Gateway.Mode == config.GatewayModeLocal { - inPort = "LOCAL" - ctMark = ctMarkHost - } else { - return nil - } + var ipFamily, ipFamilyDest string if isV6 { ipFamily = "ipv6" @@ -2276,38 +2270,69 @@ func ovnToHostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC ipFamilyDest = "nw_dst" } + formatFlow := func(inPort, destIP, ctMark string) string { + // Matching IP traffic will be handled by the bridge instead of being output directly + // to the NIC by the existing flow at prio=100. + flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(flowTemplate, + defaultOpenFlowCookie, + inPort, + srcMAC, + ipFamily, + ipFamilyDest, + destIP, + config.Default.ConntrackZone, + ctMark) + } + + // Traffic path (a): OVN->localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(netConfig.ofPortPatch, hostSubnet.String(), netConfig.masqCTMark)) + } + } + + // Traffic path (a): OVN->localnet for local gw mode + // Traffic path (b): host->localnet for both gw modes for _, hostSubnet := range hostSubnets { - if (hostSubnet.IP.To4() == nil) != isV6 { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { continue } - // IP traffic from the OVN network to the host network should be handled normally by the bridge instead of - // being output directly to the NIC by the existing flow at prio=100. - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", + flows = append(flows, formatFlow(ovsLocalPort, hostSubnet.String(), ctMarkHost)) + } + + if isV6 { + // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) + // that is unrelated to the host subnets matched in the prio=102 flow above. + // Allow neighbor discovery by matching against ICMP type and ingress port. + formatICMPFlow := func(inPort, ctMark string, icmpType int) string { + icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(icmpFlowTemplate, defaultOpenFlowCookie, inPort, srcMAC, - ipFamily, - ipFamilyDest, - hostSubnet.String(), + icmpType, config.Default.ConntrackZone, - ctMark)) - } + ctMark) + } - if isV6 { - // Neighbor discovery in IPv6 happens through ICMPv6 messages to a special destination (ff02::1:ff00:0/104), - // which has nothing to do with the host subnets we're matching against in the flow above at prio=102. - // Let's allow neighbor discovery by matching against icmp type and in_port. for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", - defaultOpenFlowCookie, inPort, srcMAC, icmpType, - config.Default.ConntrackZone, ctMark)) + // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + flows = append(flows, + formatICMPFlow(netConfig.ofPortPatch, netConfig.masqCTMark, icmpType)) + } + + // Traffic path (a) for ICMP: OVN->localnet for local gw mode + // Traffic path (b) for ICMP: host->localnet for both gw modes + flows = append(flows, formatICMPFlow(ovsLocalPort, ctMarkHost, icmpType)) } } - return flows } From 2626e8d5bd4e0d43e69da98ea518e8c76ca62881 Mon Sep 17 00:00:00 2001 From: Riccardo Ravaioli Date: Wed, 7 May 2025 10:21:02 -0400 Subject: [PATCH 074/181] Reapply "e2e: connect to host-networked pod from localnet" This reverts commit 936e6214a82062eb51f0649db2f74dcb9d205e12. Signed-off-by: Riccardo Ravaioli --- test/e2e/multihoming.go | 66 +++++++++++++++++++++++++++++++++-- test/e2e/multihoming_utils.go | 45 +++++++++++++++++------- 2 files changed, 95 insertions(+), 16 deletions(-) diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index a2f611676b..e16fa151a0 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -333,17 +333,31 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { kickstartPod(cs, clientPodConfig) // Check that the client pod can reach the server pod on the server localnet interface - serverIPs, err := podIPsForAttachment(cs, f.Namespace.Name, serverPod.GetName(), netConfig.name) + var serverIPs []string + if serverPodConfig.hostNetwork { + serverIPs, err = podIPsFromStatus(cs, serverPodConfig.namespace, serverPodConfig.name) + } else { + serverIPs, err = podIPsForAttachment(cs, serverPod.Namespace, serverPod.Name, netConfig.name) + + } Expect(err).NotTo(HaveOccurred()) + for _, serverIP := range serverIPs { By(fmt.Sprintf("asserting the *client* can contact the server pod exposed endpoint: %q on port %q", serverIP, port)) + curlArgs := []string{} + pingArgs := []string{} + if clientPodConfig.attachments != nil { + // When the client is attached to a localnet, send probes from the localnet interface + curlArgs = []string{"--interface", "net1"} + pingArgs = []string{"-I", "net1"} + } Eventually(func() error { - return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port) + return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port, curlArgs...) }, 2*time.Minute, 6*time.Second).Should(Succeed()) By(fmt.Sprintf("asserting the *client* can ping the server pod exposed endpoint: %q", serverIP)) Eventually(func() error { - return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP) + return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, pingArgs...) }, 2*time.Minute, 6*time.Second).Should(Succeed()) } }, @@ -391,6 +405,52 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, Label("BUG", "OCPBUGS-43004"), ), + ginkgo.Entry( + "can reach a host-networked pod on a different node", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + nodeSelector: map[string]string{nodeHostnameKey: workerOneNodeName}, + isPrivileged: true, + needsIPRequestFromHostSubnet: true, + }, + podConfiguration{ // server on default network, pod is host-networked + name: podName, + containerCmd: httpServerContainerCmd(port), + nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, + hostNetwork: true, + }, + Label("STORY", "SDN-5345"), + ), + ginkgo.Entry( + "can reach a host-networked pod on the same node", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, + isPrivileged: true, + needsIPRequestFromHostSubnet: true, + }, + podConfiguration{ // server on default network, pod is host-networked + name: podName, + containerCmd: httpServerContainerCmd(port), + nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, + hostNetwork: true, + }, + Label("STORY", "SDN-5345"), + ), ) }) diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index db55689986..636ea78eba 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -161,6 +161,7 @@ type podConfiguration struct { isPrivileged bool labels map[string]string requiresExtraNamespace bool + hostNetwork bool needsIPRequestFromHostSubnet bool } @@ -171,6 +172,7 @@ func generatePodSpec(config podConfiguration) *v1.Pod { } podSpec.Spec.NodeSelector = config.nodeSelector podSpec.Labels = config.labels + podSpec.Spec.HostNetwork = config.hostNetwork if config.isPrivileged { podSpec.Spec.Containers[0].SecurityContext.Privileged = ptr.To(true) } else { @@ -253,17 +255,19 @@ func inRange(cidr string, ip string) error { return fmt.Errorf("ip [%s] is NOT in range %s", ip, cidr) } -func connectToServer(clientPodConfig podConfiguration, serverIP string, port uint16) error { - _, err := e2ekubectl.RunKubectl( - clientPodConfig.namespace, +func connectToServer(clientPodConfig podConfiguration, serverIP string, port uint16, args ...string) error { + target := net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)) + baseArgs := []string{ "exec", clientPodConfig.name, "--", "curl", "--connect-timeout", "2", - net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)), - ) + } + baseArgs = append(baseArgs, args...) + + _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, target)...) return err } @@ -308,16 +312,19 @@ func getSecondaryInterfaceMTU(clientPodConfig podConfiguration) (int, error) { return mtu, nil } -func pingServer(clientPodConfig podConfiguration, serverIP string) error { - _, err := e2ekubectl.RunKubectl( - clientPodConfig.namespace, +func pingServer(clientPodConfig podConfiguration, serverIP string, args ...string) error { + baseArgs := []string{ "exec", clientPodConfig.name, "--", "ping", "-c", "1", // send one ICMP echo request "-W", "2", // timeout after 2 seconds if no response - serverIP) + } + baseArgs = append(baseArgs, args...) + + _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, serverIP)...) + return err } @@ -381,6 +388,18 @@ func podIPForAttachment(k8sClient clientset.Interface, podNamespace string, podN return ips[ipIndex], nil } +func podIPsFromStatus(k8sClient clientset.Interface, podNamespace string, podName string) ([]string, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + return podIPs, nil +} + func allowedClient(podName string) string { return "allowed-" + podName } @@ -610,27 +629,27 @@ func allowedTCPPortsForPolicy(allowPorts ...int) []mnpapi.MultiNetworkPolicyPort return portAllowlist } -func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort uint16) error { +func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort uint16, args ...string) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return connectToServer(clientConfig, serverIP, serverPort) + return connectToServer(clientConfig, serverIP, serverPort, args...) } return fmt.Errorf("pod not running. /me is sad") } -func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string) error { +func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, args ...string) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return pingServer(clientConfig, serverIP) + return pingServer(clientConfig, serverIP, args...) } return fmt.Errorf("pod not running. /me is sad") From 84ed994926697b1b0e7b014fa3495bc7da156bf9 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Mon, 30 Jun 2025 17:58:02 +0200 Subject: [PATCH 075/181] [openflow manager] allow patch port ofport change for UDNs. With the high load, UDN can be re-created and node-nad-controller needs more time to update bridge config than zone-nad-controller, which re-creates the external switch and causes ofport change. Under high load node-nad-controller may miss delete+update NAD event, so it will lawfully think that the network hasn't changed, while zone-nad-controller can re-create the external switch, and that would require a network re-create on the node side. Consider assigned network ID to re-create network if the ID has changed. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/node/openflow_manager.go | 4 ++-- go-controller/pkg/util/multi_network.go | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 5fa7d77865..96b55a52e1 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -275,12 +275,12 @@ func checkPorts(netConfigs []*bridgeUDNConfiguration, physIntf, ofPortPhys strin } if netConfig.ofPortPatch != curOfportPatch { - if netConfig.isDefaultNetwork() || curOfportPatch != "" { + if netConfig.isDefaultNetwork() { klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) os.Exit(1) } else { - klog.Warningf("Patch port %s removed for existing network", netConfig.patchPort) + klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) } } } diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index 2cf3d906f6..b4a5bd4b98 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -822,6 +822,11 @@ func (nInfo *secondaryNetInfo) canReconcile(other NetInfo) bool { if nInfo == nil && other == nil { return true } + // if network ID has changed, it means the network was re-created, and all controllers + // should execute delete+create instead of update + if nInfo.GetNetworkID() != types.InvalidID && other.GetNetworkID() != types.InvalidID && nInfo.GetNetworkID() != other.GetNetworkID() { + return false + } if nInfo.netName != other.GetNetworkName() { return false } From f07c055194d744593c83ae4ead16fd0720de9b15 Mon Sep 17 00:00:00 2001 From: Dave Tucker Date: Wed, 9 Jul 2025 12:26:34 +0100 Subject: [PATCH 076/181] chore: Update artwork Updates artwork based on cncf/artwork#574. Signed-off-by: Dave Tucker --- docs/images/ovn-inside-k8s-stacked.png | Bin 41022 -> 41507 bytes docs/images/ovn-inside-k8s.png | Bin 33763 -> 39743 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/images/ovn-inside-k8s-stacked.png b/docs/images/ovn-inside-k8s-stacked.png index ea2c8937cb05525a7bdf03b750aa74b2be818970..2fc5b0770f1d66099bbf88b6163de2e3d37408ac 100644 GIT binary patch literal 41507 zcmcF~^;=a@yX^u5qy(j;RZ2j*B^42A>F$(GBb_QBA)s`lq{ODXB}7_k(_PZtu=kyO z_ndpr{R8d~3eN+3G3Q)wj4|FZL*FXN5a3ecLJ&kC`{tD@1fhd((V@Fo;LEYc=p_W9 zeRwA&_0~mI=H(+81yVeq(dW&@Q~=$J-RGU7-7{NSYqq_3jGW&_GdGLH*+pZ zXX$vSZ*}`0x_{|_zVKNfIq5o^l5MTy2&oEz-4mk3AGyAB@` za4Oab%D^rS#X&q}oWY`1{&4re=`nQ%CLgA4#OlX3_jp?NgYeGo2mX3RpFfJ1*5I;r zvqox`B@gqq9XfmQY-m*Z&}}gmd${>2A?%aV9^N@vmNrS2_3?OUs?hk^FPP#h4gVo_ z_(jDEgBEE?S>yn&tm;Zm4Q`>T7zga-aqSnrElm{abnML+{Dgvr#xmw%_dFq=;f>4$@xUGVT>WYO$pn^l>hRYSe|d_mEs}q zZ(6GBmEF$ENHQYdVtGx;_Xbs2huC4K(O(skotN zZui?YqRR$93Dw<75>UDpNy)9EbvT=U4gQQHHQY54acK)b@$p{26uZ=g`IaMemB?|x z3cCO5dRHP6Y~WfG&#Stuz9B9};jYEA{u1D4PqM7xc$J88Yq;RV^EhU!D#WETylQ;6 zC68!n-8^3@gfFv-1Je)Vn#|hnXf@G--jQ=ZQMF)bH~jv~jyKSwSCoHsYcPw^ilH#I zi-t^<34N+xTbi*X^?#gl4r^{+@$Wy{|E^l_yL_HCaDovB3M~DGsONRWaOD2#aC)w{ zmDQ&`)0V7g!pdrfvYUADn0g51t!R>-S@~DGyBhVpdSG?`gD3vhVrN|P!gn+e>RK(9 zDi+3;7kA6F9zb_ZFlg9HuCN#MTkX~+`d{?t9qA^DHiYW*qQA-tjZoWk;dYaxm7z@> zEKO&@!O{(|3TTK`g(=b*+%1#2{rF?}iR1#)0$=MWi|fWE5h{IcJ*JMG5Bp@!|Ixi1 zaq`cEeal7M;-?M{Ue5jxJ#cEJSgJa2gJ8;w6G_aFKkK35qQo54&lvXn!>N{#EyxRW`o+) z#!InSYsI+9VwI2Sy}au@8lxVIsXF&K;5s&_ZdRz{a|C{cB=3%FXfrk%LH>7wlZ4&W z+nJA&JuSmBa<-%_@1>P~xbm#EZj6$;_eZ{S--u|MGBGvmSI{5J+f6>8DfDqCD_d!9 zwv40+ZM&*S@mQmmfC+x?r=H>y6REEQA9TWATjoIx@Q~MHco?w68S7>dnckkzw`{w( zU5+PisSXmnUH5|d9Q?*Qj?`{!iy{++-}h`!GbP|f9y?v{`#8HOv~)hhMxrUG%H?!D z2qB`7Lx&ze$gbkJ!+~9lamwwoog1)LHAt0o9GbBi+~>1krxo9RA#*O|`5w)%W}_zA zwNj$jS5;XNK@vo<+mqbm65?(jDmK!|W=1LVfw|{FXm`Wi_KR9ws*Q4t>_yEXKPO z$oGrA?zbOJ6+_4*4IWE=C-b;-o~LRPKKTi6@fFy18M*EjN5|Rx!E!y?$E#yUgDp$&lHPe^au{@e)2WX6bFWstNqieT9@b`15%d|+m=Dzxu`_8A&QQ5WF4Znx=zC;G z4cIA^a6G(|j*a{gt7~z)p$}j~+pl0^iP{J0G2C_fdrPj5(~+egHt=-+k`2_z{bI?& z6I%WL(#!2Vs;JnX0k>Bm@O+RwqLII%dl=?YXFT)Whw01NP*&E)7$$uytfK&rgUj3T zG0*Ku*=ALV2Wr>NW%bc|4}?L=$_Ko#cbmN@yG2~e2kRAb!p@~-mY??gNBZWCimMpO z7r$a0)}BA5yJ_(1qt(UOdr-(B)C+Rr4>-7xqP!~c4hbGqLxm4~~iY9TZrmdbmootxH)*v6-)`^wty_j^~Qc<$0mkj9-RY&9WA z9}G~cC5}EER6aRsO*y42*{fFOAg9&o7?L{lk8d&lhzSm?IWxBBi*xwPL`kqb(n^G9 zYiH};2M&-@ci$neEQ<8^){>7CCBQ2{$#OQq786*i*Izd zoEK_ZH6)Xg%*HRxR=tdFioOM}9VeeZu0E5v;W#o)`xz`%Ja6rp2GPsm8l<{vKW-5% z`nrTZa)wn65uy{a@G2D2#Gn97hAvwqHX<7csn_=#U@L~Bq$ zzEziiEzVJ&jNe?!n8MTOW@40E9-ZnRx`C(Mu6->x4e^34HaxC<>ie~%%n{EPK01k? zvIsWyIr4Jdua#%18UP_zn!Ir$7*raKsHZE-^07kCojcI7!{vSQ2mqjxj()6tXa} z*xN+o@??Z9PCZig|F?c5jfmgkm(N42`NJD3`-}9X8F2UfW;<3U6~E9Vb_E_tAMe`g z5-l@T55I3lToRZ{2Q~A;93K}&>&)AHJE=sNg^X=T3~!gU7?K$;kw{b|=ouA{D|FM| zOvmYuXpO+{k8!!$a)2o{s3Eep$!sRoyTeX z8SA`;tz`ZT``I>5-e28yjD*{4o$M>q`hh3P)} ziwPP;1>(|gK_y51lg@uXbV#>`NnpbWCpKBw%~dLvT;k+tYCVFl+I*aB``68($HJkr zT36#`nT_SMo?`6rOds+V(aNtCU1X}N)?Ti^C>Kl?&~nth_;rbNq|hM~VYsV|Y43Fc zI;Lf0Q{7sZbJK^nZi?5_u6qk>aT3)aEqIZJ-31W?MwkBq z-~SIbI{5^4T*eGqSg$p_0nHy8z*Z3XLQq=F+E<}=-6+_QrhC%NpVF4<)^OZ9Ik5Wi;L}kOS96H{RU+-(R{;qhL}FFL z`)Kt;Xk;IJe?tL3xeE$9?o)Gqp6>y5`hYMkfz;x&9p#s>8$-1 zc}Y@)f0mcBe2%wFr#@=lbG+wRjkvTk9mBLr;jbkU0a2K!1oOR9#*A$5r}ah*|EGgl zNn0jtDOPDn1X2k8NiEZ~+?YenSrd$qS91Bjz-#yNsskOY-78ts;O+e_7pG{%r5Sjl z1V6}GP~S}+z^l-e9tj*oak<+j3bX`SiQBx{Nk)!TLQbWY@ zST^9vV_g`bBbp<|-`T#hPbU2$70DjEW)kxb-@2La!^2(9_fOq&zqqa_ru?-3>dACJ zZFX~cy5+P1M7vK8cSUogaHRR$-B*_KDH-K6^e0=H zGQS`uRDyzoysy4RYFIgAF~o9|pK3KwI(jN@&2e{8TuJz6fN0c4%`5>xZ838BOaQeF zFro!;y@DB_>to)sg|&$jMHR!Oxj*s%w83JT6)O~wFp;S^M)%Fb&zpCdXX+=j*&YCp zZ56??(kF#?u$p3MP>SmTd1$j}Se-7kd4f9S4PYFeK88LTNy>jxOg5vV=R^Q=<^G86 zpy7Bj(jZ18UBJQhE6$k%_eNNUTaahi2qDW#YMFRXxB}sw%>aM2Gq#)p5P3qM8PLKE zhIQSUs$2X4`B{6u93v?A)Zul@(MnBEmeCQzm`1E#qgHjQGR(n$!^~ zM7I;851)L2voEvYuDg{N&=9A()lUhYcn4Zyu0)81S{!HK)%H_2Wb*WNu=t6ezwSBF z{V^QaJ^h8dV;A#fvX(e(MS?dORK23UQXS0H5X@aIv;-L32)c!MCWwV6N1)L{e{~5N z+=5YnjqrC{P`y;R{yWHgl&xP9*M|c!Ft_6_hOF%*DK?4}hPGp`@|rY88KxSU~Av~%6Qr_&UoQ{rQzBVk-U|Nz58su*KsKiuhoAJ2~WOM^#= zw2tz$e@6TK4sJyZ7L*%HF+lqOGDm~Eayb4EHZ#w60A3ctHrlyq<5h|yRaDP)h5%U! z-oaH~!DNds2RZe|a^ernmBa)%FTMwcc2N})dp}xLuKDjhW><(>0k(trKlx3+?b1cm zjVhbCqU*bqUQpOlMjtZYlDz;;E zk0eR>H2(4~r-I!|1chp`;K=)@gNtb`4CD9r6D1ttO(iLETMKWQ#DGZVS43y72KmeY zp(xf&VeFJSSePz2e=fKyx#`svaeydq7t#+FxGr&eblS)C(In}dSLLdldhQD5jMjRd zBG|2|9DKOKszgn}#ye9MxA8#|Zy(<=LDdEhS_nwx{k(B4z)H*2)X^TxvSvQ(pRjnBo2;&Idu=0QaL(EPvzwF>5=2a=8oB zO1CTfdw-t z@TM|YdA(|Xqmk_O@u~BH2c48x;%z(e<`M=vPLQ_iuMGexRi)4a_;UXbDH_xsZ5vU$ z?$cvm^-9|;;trCH;x4aAIMGnWZhdFTPQV3w7XY|?qGN2-r>msomwdDjORCaeJR?f9 zU0Y}qjmG$D`TsB@lTG)2(W7q}7F3{4#8k0Gq(N`0RcFcx9uG7$$Q{0*F1C^p1PzLN zx79~~Z4lQbG>*d0fQ2z5o9_vlWb%Y&@fkJf#NME~3S}mVHY8a|9IC0XD7z@-k<550 zu`qvm%4N~`d9HYUGAbKOLEzu8005)4CKoGfiQ;1a#rM03-%x@mv(()ZHSIdLhw2lB z-MF$Z_O!~Jn^Lk9OkL6=~G6OWZ7kQ~d*@Vr0k{TvNmqi!gT4 z(pBa7Q^abyl+d9RYj}8o5ym*Q0}3ms$N*S1p66$5V(B>bGGU+eE`KnJOPOTf^|wz` z?`^0^?xWMrh95ows=$HM?;k-RoRB{X?jUhQGh+(`y&cQkA3)l+)2b-`j??qrKI(>I z?w%G2yA`mQch>Qt5>&IdyN%?t+VHp9>$6#^maSr;(90GCbd&cU%K~U9a=y6<6vY_G zdo#>5cj1hA(6j6+S{%`P_*^bCfEIE3EUmZjCR^X@`L$!KnP_#{f8meYm`??s;qMX~ zs&;+ttxL}w#j(&x?^DUf53e`Iabzyj1+kQ4nap20!HWYu`(DMt7=P_d!BkvtJMmy` zGU^%>`=qz8nEcS1uSeN8FZzto$tZFee|o;tr7!WH>u_p6s@s1pu;JmPRq3<7a>o8uL4NiMm{q$?xx`<118YFLk%rLKTs@+UoaLHVAKgs^9jU zglUi00N8D9RK3%3sIMea`-l{7jP~&}*AUh7;ZG##ZTwOsAdUj0%rB9BL`- zDl*~UXUiFlu_SovCx;$DGNg;b^7!+m%#xFk%PB0QBtf=r&Q@*BWvC)1g_dUKy=|P- z6%Et1y5jM{&w}t$6L|(QB6aDBQvT}}l+pOZI7xmyf}KT+HWl&L$2>WGrC1zVa$+XG zjhFHmpf_mnld)Ee?g+FDP8*Y@&wgY@an4O3vO(4Rp~WoN^t@qXY)G+0$$lz1!P=tk zH<_ZeMIwM=eR?$JyPJ+rcR*$Bi5@gm5q2>7Bc36pX>!( z#ZZ{dB|ma+)@5GxqUqbx#NS*qdBw?z{r=={p&3WB&sD}hH9IxMz`DX3%sYi2i)J21 zi;{ib8XD@S8+>ZAfRwc(uwD?9%lA)uuLItSLQa)9jR*fwhJ$tF;ey`D^;s{I&U4p? z2_FF$`|@wc0zCUuz32W)&v;3hbcL~Wt4|9tSTJuxY%?Z|MULaImz?BVjYkFUcdT00Uc||@h5ud=*Xi(G1l-5Ce0BE4&>TXf1m1L zPICE=8N~1H5_-9%j)a{Z#SNx1{Ilx3b*Z+**wc?;&RA zye?JUWUVM+p#o=#4nKLgFyi4~{6n}WF3Q$IC20G^s@yF{_zPY1Ab8})Sa)p2UsGn? z_L;F4x7bB_t?I1~+b_x7t@*KbdaY_XhPxr?*s`6iE3(knY`fX(H8ip1i@M;or&^+o zah+S{hl8&=F*$w%x;`X@_i&~^CVEZ8^lS|tiirVu_n+>M_->L~gi-n`>2J`tUmBNI zk}5^cR7Yo5A$j-5J^XT;Sp+UPbvQF-6{43vmSG$U+rJXey7EvP;xu4b0Ib_Ed2Jnh z$&8jrbh>ms*e_ut#zprliLGZQ9gk&Sxfg#2UiAbqT^}=)@y>Qg|8#*YzGo$a8?^3g zQ*|^eTFo~N<$Yoad+&ilb_<&Uu(!s2!|+x1($(qcd<@=w*1^xnt|fOOH^y&)fdfT< zuM)>kBpV!@Igq0%@gGV|#(R*vIV_OCOUSko_zm-5E-^5IQV&SBq_FUt8?s5-KO|OB z>?`$Za!WU5CUW@>s!n2)lDjst6Nsu!)vS_?n&*EyQ8Q1hu16A8xr*`efF(A64@$`8=drSoY<7PK7~Xm@OlsbDSuVTDh{e^eptySjWZYN86oxH&&zDAV zlA+X-C{~0mZr*9ztCusvVavhmE~nabv!~ydy_G5sKNE2(I;`AG_7Ar{jX2p{Sqqx4 zdEDjky!G|>7*#pk*9?;A0e3(JL!aZjykwqpSUBij8!LXxiXbS?Zk3LZU>zHI0GJJs z=<2R(O`lgs-}vSg+J_vTUlBB3SP(CU_*gyomXc+ywU%jCsQ=NVKeOubOC}_HiFke@ z-tUX0i*HVH`T4m9l4ReYM=x}@5(0Q58mj?<2cq45eC9L8gIdxq{OUiJH@g4ACna$EFT*u$d^|A?G49=XQw=}rc{m=)D`eaZc)J_%8y zZ~pTF_EB_SQ_Mw6wyWQOh11z-V@NvH%kjq#i;Mc-PX(2VOilqDOHl%HP7iTdM6*wh zYJrSu3zs~|7YObuXq@wH{B@S&afV=7m3Uc_Hp}@Kx3s>Og363UgRNAEf1_^KHNUgV zCvB~AwkXb}uNVw<%GaaEnCzm*ZcQuyY_mvi@oaA-OJ*z8Jm39@HeVEcd37TKr=<9Q zqs)#m&73sV0-mz@_$Cl40f8dY1nBrz_z^qp&PSj(06ng*q^C7ixi-w~jP>vd&2VYfsDr0Jmkv@W9A6{c4#)ckblH9rMhQTE=T!93GO zHyuC)L6ypxj)<#@<$8C@X6A4@i^*7HN-4T(<2OeejU5;PM^XDpn)avUS-9t07Zy6i zcaH!4TT6oJx-4%nI&qqWZLhhqafla$2s*s$N<^EiFc_^D`rK7w--+Dm7sg6S$r6%i zwnb)~pEp=O`R%U62)&VvSHNBy(oI0~jc9cHy#z(Dhr%0bSlyg@{(TJeb90ZRjUF@_ zWNTktaG%}M)OR)I=A8Q*j8QKao$*HB^INz#C;R#-Ue8mO_xlOK6Dz0=2fd`Zw#$_v z%t}_JZ3nI8sKbZ@^R{2plog>uLRV^Olv!MfBHUIMOG%|bUh7yEKy{z|`*Cr-s&9pb z2If_sJGs?hY{3cBeEw>aMvO0G+`)cpVk|(jlY_Qp>bje*HKtI==8a#Q1ZqdF9jA}h zSi7y|seHE`k6%_qv*X-mN5x(*@t5QXK##@!pBIjDdoO|Mj=xyR7k`7vut<1LViear zN)va?XwbGVHENOJLxs@6?;Ys&dN%--khZTp$!{pD2Ua##Ev{;E6ty#fq# z2ZJdoQKxmRCHHQ`MBlzrP8yv0%2`NOQ~F ztF|0_HhD?!WSdis?!|aET^@c1I^b^Xi0wm4bi`dplO*8nroR3CmDW(+(Oy}sof ziAs}z1N6r)vlYObZ{TEE&*hbOOnGlE7#I7oH;R2}rX7x#E}=IOJv!KR!NoWYIRUON z9xSe}+kVX-x`)D0xTu~ix3}=5?UZWau+knpCO{Xp?~KSxa2#Y@C0Xt5(#%d!BV=7U zDAZ&0S?R}>Lvt?~`fo45uhlM6r|u!dD)Ra*jconk%Tz+O2T;cg%8ahAzn@_zxm*PO z-L8xIurG%fZ2=bDuMKOKKJ@lp%r1}lG(D?(O>4OY+L;IkYnB%C#uRp0d^MZ@-Lk{a zTwtW8UB}(`1KR`0crg2I<>vX;^>E#ZmZp@Ov+f+dEB({yE*j-C0U65Lme1r4*6ps! zh|-@=nl2rlVro$v^#vXv>{Aqx^`X`PTxhu7_xl<7@Ww|LBErUj@PS5zdCGk_WuAbl z@jlyPlCOwpvhT!b3ByFI7-qQpaCW>mjku?xJlRN167@cF{>u;P&#!u28^tQRcYD!H zV^DEj{P%Y?*HQIK306nytM55d>*sV=A zXUHNSni~);II$rw3Z08RGn13#cL>k{68}o2%;{u4lxgtpp3Pd(T`rhTfQ~!{;MfAG zk`@}nPg{36@>#y?G;(w-xk_MR0nrSj!IJ_p(hg$B%iUQd7um5}>q1hKt|pf;V882| zHCGOAG*H3yb9q_Dj#B}!ZdX04OFi3w9?eSPkKhz@ zaB%!@btUNy`!m><6WcCILwHdb;M7$;fPerKlk6K#<4zh=eoHx^CI_Znj}Uet_Gd?p)i`jkS&tf@Bp zbtc~UN$kf$rDtm&88#1?H!t%23{jawl?M<0U;ttYt@~Z|Sv`e*+J}^zgnNp0Vm$8W zkJ38Yc7<<@PSw}+#uV1>l{**vmnTz%!|~7$#llbx-}}lewCGh5>#~CgTkI?^KSD?b z7wPKWK>YUs095{$lKgfMx#L11A(U|-b*QEqhOqzC;GOthiV8gIl9`P@KUe)(i1^;! zEoM|yv)$SchAC*Tr-$w(U$u1>4J9LC>-T3m`ihZ=`a43ce_;Jym-8wS5M+Bm~#DVi5()=s?>78J(^g- zQo3o88)&Ww13bQ>Y-(!Iv!IHte{lnJoNGr|nEmQnhf?1J_l0uGd>ea=O^nj@w2pe0 zW-mTvW|h)Bz2dtL&&TdIUpkCS%@Ih{04r;Is<&3$s@3VNaF{m-%lgn6r3dgis5u}- zHs^lze@aLg;Rr=8Zde}F6FCgpnr_al#d4xGsOdlVlJTn1fn>krn@k21fqAsYd72l1 z#g4~ceIhB6Gs?zJ-B!9au2&mXbx9k5g6-eqr!4y;yn%zm>?eV%7;VN(s~R0&qWFb_ zpu6kWZ%qU@4UMgDjbGaocL*~;T)!P-Le+~PThOnZJL$*toqX=E)9hC83Ga?~kq%{< zanv-um3R3&<8Z!jXt>)nN|A&FB6d#=`|achFG8Y>fn7}tauYoiL`@=8UjM!!TCv_hrS2GLQj>i7Q>ghg zYZAkYqHoIUHIAJJ($o@-t}qwC-OJZHS$>HIb?*|SxV4(VbV zKN^b;$WiWSSRN$}Fwij~UE3Ys@;Gk70+lZSFY4IB-18q8+;uE#&LExHhwhL?fP|hN zV7Hk0fT(Jl(7S?gm!j8a#;8lpGSk+!tU&SN^^nj*{>udi;Rv7(^ zKz=+q5Q#R4!zpG3PjDoKInI|k<$=oG8=i4_VXv|8PyDGDvssk^&_gVC z@9AQlaM&)5^E(`y8G^nrX>(H9UgiD;gn>`irYV`3<=*)O>Y5W=rXkvN2dRc8|?Zh>gmd`S(xptnGuN6{=un|z$Aud1(Dl7c35 zDhZpVz=)A(ZRFoLVZ@Q)BhEWddR`NhGRSK2#@##piI=PM(H_;*{_h0ERKGWmg(=E4 zsK)leVThg@N)4ycz`w%Y(F7b-Az>=QSD)(GwFXT_q&Zc$4PiJham{VAs!x&dS(7sR{NTs39RPpfiq6a4qGDmj9N71c#twsv%8WS*kFQlFAacak{-5oUz!{ad*?m-_}t8rY1xvC>9 zUlRs{39&VgO!M=})NM>s z+WQqVnKAoA8D$*ra{nBe8$wXoms9r^`=wD-;R^l5HZeZTK1IFPRaw)jZ-r2zdPu4FcrGzp67wfj1ZPOJW)H8t+VKzB6N;%Rjc zry@+@%A7f?NFVvR&j@W|Xf63}0@8(zQ&E1t9?LQr_((LO>=nZ^#k;jjnH~=I9N@qvNhnkm2WCb>Cw<-t{?+)mq5O zK*M$nHg!$bz}czpG~+#O1NY88)a8i->s|{vLWOz(?0$<~cYC>{)g)bR)W$>w>V9#{ zBSk8_Fds>@Q|@+T0nrBYOr6`)&5}9To*e%jz-`azBU!rGK6pWUc{V{CVMZSF8Nckp5f2MA2qxZcl}PyT9!5Ws$*W=N|2?nVDV=_ z$+MyM`tr13aVIIs^VMBb3**5SE4)HM;iI)0`obD0l@^lR4g`*Xi#j`1yVrY3Eiu35 z7n>&4i9^>(qQFK#2r5+~XaYE18;VFsMWHB8F53EhoRQ8>0c^B$$wNWDODeGrP7l5_ zd)rgp&MlIe3vkShl@x#-g|2`XmAIAOQHDOP2Qx_Tfpx!{JW>=GaLj(V{y_bAeo?qN zP_qfw`CN90$0$awJMnkERK4ckJBza%w_6UEX4gl)UBiq`QlI5iaT36yjBAtcelvNZ zD8RIJhN>B%jQ7tF_Jm4&=l*1*m&i_YSmqD^n3;lF6ZXn|#(vo!jo+y|N3v6B2-&D9 zuIa-v83-QqG#1_1l3HLO@7;Beo!9R~)*(D7fMKYJWPeY%I)n0zKrl z$&Ulv-1bVu0YVF+Tf=-u6R3w?kow|R`?4mvvY|nESV=|bvoO*?G^c}OK zfGw%a!{D@05ZFl~s-^kIFqeIF%FwH`5S_x*QxZ`?u$uN|X*lU_Bm+h0w^LB#srlUI zBT$(m8u{oR;`hnzkXqcsf&6+LyWiwxKogVo1zgIF^Q}e$>{VB)ZiVvG`o+SNZ_#AF zWh=p5JP&K&JI(lIpNzF#u6hRwm+Ay@R?xsHMA)KT$Nmt@bbMvy^DWh5)sv(}y!g*< zFVV~GU}mj@EwhR}k$Pi0u5{LnXzWs;99IwNQi(H_zxRDgKZwa}*5*OC!!B;m6c{p? zk4--8KEFM+W^X%G#54Es4SE2uPwr>=1^hVKv7JI;PXHvl9(6S7OYt(K=`AkO@ij~F zvxbT;Cx(VJM7dBt;aue$4C_~Y`S#`}O zU}i?L7u@CXbmI*l57MrF{Hj?CIJpIV&rTdKA7fT>C~^Gr1jVHpnE3!Ab=5F=Ufq+k z*KCjFe&*Qo@1-7bB_K|XCmO`ztS_JG&^4Hx?r#5>Z?;#W)-jgX__dQsId@qQ>Y#Z? zTmQakJAx}sZyi9V6ce$gRIW@Y?iH&XNM8V=K+*Zyxn;^|?(Oz|1pERZnH04dM@+pE z=_+l)?i%-Hk0eL&0feP+|Ni5N;u zZ00vGG2(}1HRGUlQ+O}uRvHTMw@-ZVoN)=Q(6y4ubUe7xI~Dr1{qLhwbM5I{Er3*` zv8$IH=B~2h$Kf9T!Zh(W>}QGB=`!+7dTM^#OwfsQMK!Sdpw{6y&YkUg1pH~N`>g5L zb}%n@0YAjef00?K`DyXASy2}Qm<%Qw@NKa87k;sd5~W|QHNKp^`ntz`Jg2B77a@DV znA21=i1t_4p@gtW>u5ZXTH>NX-*GH-K%UlX=?1GEfJi*G8=r>Zi7k4ST31#04*<}J zhOaGD3Hr%gby7teCihne%e)R9`fcSFNpGGK4bPmugfB+MfWOMlIZ7ljap-xA@}CLigx<~A4kWN6|v{ROycqSJzzeu4&Z;d31eeIr;$brJOB z$?IA9a>m=;1%?=8tshgtVl_DoU&F@-xX*h9IufWaZxDA0n>Fah2N^Q!ANS0bbj_2U zHVbaDdz$*Jq+ua6I%{&OL7ioupTD}I2(t#Th{~O35$q|F?ZkKZC;C+UbCDn9OV2aY z2&of6H@FMbydmh%23mmpFGd-WlKAt_F3AKE+`Ip-*zZdT#=5sC6^U)HOy)bVT}`%; zZKf;@LPvr!N#ZSxm|Dkbd;LnbIlul4vTGA(X7V6?(%t}noLN6^hz1xJ;ZY}#Hqhr| z6VU-l&hPFM6>V!DPZoL_P+0A+gtj*5Ond8bP}}ulsG=6(o?!M+#-PiTEGLPZ-)#B* z-VY>T9ltmK*vA~6TB`NW`MX`Qe15gP<2(2&?+eFwjj{@d3-AK7x!+Q!-n^R#w<8F^ zheOY%iFg{7lR{%ZhDSNijYxDP3o_a`#4HRGbgu?wdu7U6UaJFj>q`l^9wMIPe^t&f zKmLBjiETilZL$yogU~`jIkQJu<H7BjyiLOgJo9tx(zqOrUtdhZ#!M2(c zf3ccFrt)(?|4Ptn+6Ir@igCEF6xzaHHoKlKZ{)L??=c-mhXicyC?=S&pW(%sy6!*6x_E95&mK|%2qW$TK|2K!XvIL3iRio zHN#qABr(5rvx7T6EVY>#mln7vH*h=R-#kAOdODaT#1gdgUJFUIv_ZI2$p3ahBbA@& zBtxvhr-iW%;njWy`hPjNPDJo`x|JgTb|_KtfTzt zm+A9^e-o_CyGyUr^>61zyp@gq;GDmN963xB#!N?N_oA09#aGBLPYr6~BHwG`NMGc) zh~0L1KZ7OjDca%L_N31{zUVG2wL3Gajl1U`Ti7BwP3HyHGJ(&gz2=f8bK0^C1?&nz{w?Mp4|m;|?ZJU#QHBn*Ag3iiPWhis`@Aq^jA}zVqG zwgsV%M=t!S{_dnk9QUsG-v7dIKujaD^PbMf?QhC6g)IN*moDOdtJ$*#J&L>+Fn|x8 zI%L2mBmA^>{j+whtlYLJvt~|oX<8byqA9}rcN#s`J1wp+iiXAPv2Sk#<(0gnIXm-@@T$Ul(-&C~Qn0VgI9sw*uTnEVrZ`)S&1&!;21V$hT)xDcU0f`uUzF>W& zMdJ`KN{Oo?E%ji}8@#LJV=#&&R1Q=AF(G{jeQ{cAa0oznVPR>afqA%@{lvdNbBirk z(%kEl?>RgGgDCbc_bMr&0>XaAz_XmR&~9hY3Kf7vy5w3`U5aT)ep6q&QB#sakGqQzdcZ-UUCHrhDU68nANX9e!3;Gw zSB7dzRERI&jewN_gQBVmMJ}vi;)(}M+JDIkDB}k6sWrnaouosgw_cv7WF2p8=Ngd{ zekS-M|Fd^t7ffQ3M2x0=&wlO5`EtzF=dg?8RqMH}{to5$xi;8jQ`Kb`lsTB3-HGr^ zkkF8$N#BIY$>>l(*6=1tn$q!x1Hcynr-W^C_Lu_2Ee?4W+&u2Ne-?g5sEFc)m%r!#g zmfFUMbBkYKH1afk+pC#!>9IDafq<;&1OQ#92y5+d%(+~0jC(?jf7Rc13Mx5ai4uC$#zYm@vP58JROeG)L z&j&EC`y?gw(5oU#Woh$q21}0*$Gsf~<->WP;pNMgqTzRF68hczjSo!ubJwIg-$gIc z5k=m84pPoIn(9YICL7ZV=*^FGy3BYpz( zfn!^O4_}2p{F9mIuiy^V2sBSMABi_E==_HLR|Pi97&jgrSK#O#ZLZ|X$ZTz; z!U$r-`Oo@}!0o+f1&(vx3_R_?VWNG|d^4+ah|G3+5)RheK{(K9 zYbYD!#)$IeDG55!SW7PteGTqnZL{UEmQNSutdSLkU zQ=5dkIjE`w^XmTZX8Knpuu%v)f4mHLILUI*%njsrC-rE746# zwWDlg&++SzCR@b%gxhPfvcwDxolcy4B~{gVPhu2P&+PXmt?@M_-kU;>D<4j&Z{){D zB#gaA9`TL}N?+KT;ITCms%em$$QuMoBE}xdM+%bblCJ;FW%x`><05<;>;r#tvjN3dQ|Vh1>v#LI;$ZBPs+x?Cq=whoL-EkU_h!Q`ObCYoT@G4! z8;l;18(>-29v^?@dN5IbjhfqLk`YjyQUTU9K!D|CRTD!(UzvcLdM=uG9?tdl?kTgG zpiT#xd^OaAbxH-^po$UcgL@4npQB%D^;8RPm%jH6F@Cemjd=L9@DM6DQzQV_fdfpy zT)@$ei#;JehPxn2Oj&Vn(9JNfjHVwLR6X6bx>p>2lq6u*E~K;Oayp*&40{{tp=MQZ zUr&^(Cd&N?u^f8dPWYa_5;0&KS&F9v?ty?1G898G98oy)mYb7 z?Hr^^Xgprd0&yXrH|;*3F(G;{9aK={D>G#-&C*MX{BnUda1AM&`~_vmt3|@W(Fx6$ zf@4gB)y==2sc2T0tF@ z_(k{acX=r`8Jzfk|JvSI*}XUNj>ONC^HqzjN+EEN0k4ulcgtQ<1Z49R$mY}S4Pn;{ z|FPd(dz1U>OsBRz)K^m*xxi<2@lS6nhNeQ4@OuoxYhK+@zWz@b6!1(OH4o;Sh)x7k zl>jDRhECECyTyJD4< z7bWp`6ky+y?Zx#L`T}{G&rMar>0wH@)<*fJD&cz|C%^6@nD6C9zsI_+vjreK4VqA; zTg3j%NIY4Fl5okdl8@J%JtEf#eAaRwZ`tNrg-qz#2XfOceXA+QVH0&at5vk3!sy?U zA}&44HDLmmL@hBG;5{$DN@i6zzH?5Be{@V*sR-{3dR@1f3$3}BW+pljr=p>mJ zx*VwG53k|`g662?{^a8s@#%%@d_}@a&M@jkd&r26yfPcNa5>eOcNMB;0e*e8bo>rk2l&NxV z8C0RE(h&t1Tj+S@S{uDD07k0mEzRI+kI)tlciJTI_S<4UdWd+&J@?Q$dxQ!@7|;bQ zG|bnA|0?fDmWo5#Z|`?FNR9EBL;zMAOM>+1{WsR#pG~FfGUP$WkvA_p+zMs@AIa!S zs?4ak$Chfx$JI&w8PdzEF3Xz#K-1lkuf7$fs=^ZSq4n2owP*YL z3&41{Yr-PbFhWpzQh%$DUi*o-l$VF+YHoF^^Vc88RQErx&Qqn=DlAvVKtx)!ttnq*x^)(FNZK$ONHC1X@VM*pGT4^#9GF9vxNR1unufD z|BI)u4yY>FzP^BjNJ$D3BHi5xNQX4iB`MwAN=SD|D&3vZNOy;HcX!@zc<+0^zu}%a zXU@#tYp=EToV>y94Xp@{;^7)Iy#x`(ma3&XAn(+4t0@t_g>c(AAdYI{8J;}kVkJW7 z<2741%(Yobo)f&AdGV|J`_`gbNMCqwINVhp4u=}mWDTSN6J}zg?thdKQGl1K_C3yt}DRXPOv>?vc`ibOigUGfNZRe7@AM%nY-hq$j8M2ZEy5C;f<#!M_dG;e znvKX01j$qZ8Ek-F zGejBq_U3{@AS1N7X+hqF`&Dv6MtP5NG4TKHeO=s3iMFl90vVv3D?R}cB9}_5@14Mf zV87OAz9U>6Was1chM|a0oU(z_CqVEXzLSXm z^dyC(u8qd6oT70UK5bV53yfNLxo7Uh8`(I%uRD`D58{;*p@$d=htLn^mSco21J8Xv z1G8^;uB?6kdQj-q`2hFEJmnOthg+LJ-GDV|C>~3iE3$rLL(z9ad*9#PLLi*4Oib!V zbEINJ-smRV?Y)GQKS^qHqe|1{c_0fpD4aiW4FUdA>A5%X5HG>*BI?1v$JN3F>-s4# zqC0`z=P#@O)N->UG@XLXj~@h=@vsU6p)W%*PQ$XCm3MsRF0jAKhvRsLUcy7b*(VQ& zIhq1`voYU_`gKX)^4anZU9~V<8ye>m#Uif!faq(*21Wy%X5^(!^s7@>bo?Enka`vc zHq_(8=Df*~Yfi)+Wia-T$>mi_Z0qhk-`}fa^L)}N>XI56n)p)^bBPy_&SZA%w{EMR zh)htp1Rog+M4h(lnO(Ci`APDy#s46M8v{t{(M|PP%z;Q2LH% z4!SI^^}rC11F5JQ!L79gdOhoHUec75uiP+Uk+{X@E5XLZ%P*T>9-`p18Zds8{J z-Gt$lZ8y79|}&Ij0=7%Y1 z>d*N~#2*)1hlLYQPflGCPP3O!p2%O(%!ysA(q~$~4*rQx0-f}0)!$#;?lhV_dvZYD zEM9m`*IUJT(;Im00@^mx=HDdaUe9ROsYzQ&UvlIqbt6K0pP=&Uj}#1*_4TrVaCts3 z0AzuSy^Y!n7t=d#t97(NLqjLa%2>itl4noXg$OdwW0bJ)>!CWj z@1CGHQ%y!wy_v?+-S$Z4)Wybh?nIR;3h^C`?-0si7pQkM8s~21X;tyIPB$BE8wTei z9k;2`cJ@rZ$}<503%@*L{;M6@7r(+kNI)vCzGodz=aXQ`OiX;;+@$Sb&yr&C-m`(c zEiqIpsfJ!(nnQFz4O00I7L3?P0B7v@i~u2M>}UUg4PB!m=(CQEm!`L33*BlU7hoGe z>k7&~zDGYvb!!lRt9K=8tALa3F9GAE1<{#Vg@mS;XTGUfW8|kI3HUyUhCV5eJzuCum^-I6-KJ}H**jI zP!S>41`eYale&s~M(&Vz_fWn$gTKf^cI`(tqE<2vZ@{hl-FtACr-0&cu`-CM6RpX* z>qK|SY7sZ|>r3;k(g=9CWqR~VBd;CPa35p_0T@X6RG>#F(SJ>dl%TM)6|KteXh8LK zp2Qxleo>PnKQ$i-`v?1AMj-LofH}#)x$8nL|6LeH*eHrW(!j`uiOi$t9}^cmyMvn5 zDt-Ma%j6D|heQ&g?0ul!8zzl(7q`LhF;9_V(m zHGG#7*WW8BN4Z>b51S*d%gr><54Eo0Mql~y(T#ADNuJV&lwKhz?@f8e@4h~EXG?~E zG!dy4tOgE>;{6+ZwFl5+@y`if8dW;2jMfn(x9cS=wpMjS5R*q>k37ETI zsK9@I`_o}~34hqu-9&Lxd4%5Gpu_*yj~~7pgOxn;d&iMWQ%*le7lDb7O}TY@S0Zt@ z(Ay)oaj&Fq2~zGdINyhdqX?&!~Yt9X+ zv!-P;*XJ$MUVOUlBl#s$p=|NNAy^czfGZGT_K(&%H!TC?R8%nz^H*JhgR<2n`t$~D zRA*4JWBW4YCYznniRb1|*1T`z2-(T7ki7-qequWd9@PRrAnhDhA28w!HisFlfG%Up z@K{O}@*HF%|Gmp)gYKb`Quzdvt7iYhh3usBVU8n5Md3mmCk=FK1vu8AT4Y|3m{63u zl?OYwRA=?tsZ2?wIF%a6jz0E@dzff2esCe=>v*mCTiD`Iho<5w>`mTD?5vn<{bgcn zYu;wxBO=Vp&++7N>*)O*hu>_xIr5?u-!Dc)Yw@P+{pkU; zCUIC#N)%dZw&3XwdKbCq)Q8v?iOS6PN!sbH#mzu{Tj?cDE=-alS0sJ0;4Thx zI~U9y@U_R6L`XRL2QEZbuTl^~$4OZR8x_x3@1RQRQ6t}(p)j7HOQf^l=uQfE&B}RN zXS*@_iC7Yk{-NNt-Zx(B84d*X3mL3*?1JU!d`D{Av$loz!}q2d<9F~6O=I5$e`yM^ zzr*oW&&-32?v>~g#ITcT&KTfbXXuC?%e}ljm-@D1+cx>vp4(OT^yfsj=H;ungz zMUknsR=)-8rzT^(8q|VquXM?S`M*P0csLFdq4CB4Vm#4i!ij5`qr|&BjWTf%q^t3> z1xMuug25?i$IdVBoMa6pnYc{Dhe(;|8f0w>gus|wvP}AtHuyDT37Oi662}?s`=NU! zxMG-oPdXE>t7li9rw(L-CD-Mi%|Uvc6eF;fbG(tN@V}7WK#IdkM7N^9A`3|$KwrTS zy_%{ny}z=pvKe7VaFijF)$%eA9(oNcN02r|fpyj!_4JmMX3$QP;hQKK8cfhpO8f8o z!h}3uSc_VBjh6PmXQ;XfW!L2mlZjD^8iJ+gJALge$OVwjup1LGWZ1&rFh2qJ=#Fu`sL^F%BatZS@_MNK_hWue3{4 zJZbNc?db2(G5)zm?~H>``y711&ls=~B+*@)vFClmQAOTTd(nxT`d^a{OZV;|^C|Iu zI`Q7-Z*ler;3m0fCb?)QIT5H!&Z?^33%QkK2&FEFNEba=(p^t9NKRzZ|0aaN5{FU1 z&iODZKhRL2`-r#6`(}J#Xvw32hQ?M&Y3G+$OKc2m9{aBJH&%BA1n{EDHBo{)Yd4H} zdI#)BTP-#ouY`osl3Y~oG#dH_BKP?p>Ze7_K3n2?4@|r5XlEBNE9i+S@*d ztjck^*1_9ymoui~^PObI?McO*50=c2YpBuvKr;L1V9X;lok-`?2l^l^aie~E3(UWs zcgk7CWACCnlU_eg;ExHWg*SQ*-d9J|wG?nsWcUyBP<+Am75(QwZ3L5VqoiKNaObK* zy3%Cl*5oXUNEzNA6WdBotNU0PfgZ!}@<-GF4}dqjDVke;?wQn21r~K~mmArVZv=h) z6(HDRQiy1y_D5%^>G^3Q{=TnzwpsSSn$>~^cjADb3exv&i~;<65o5mno+RyyDS z>9dOQtC{y=@3Rc3HF0fqBu>&2Rdsii783Hy;4&w+2;>;isHIY@g#^MRw~V6lc`52h zBGtPJ6HlNg#P;T)6*1|rZ3Kw)spuL@AwTVE%6tNy1p z*g~G}=7Wi5Oj4@-J)^(x8b6gzXqqE~^H@!DtaTUTAKb>B=_ML0=Q-amUK4I73(nd% zU-2(xyK!oVas<%Eaa-mq^>vJH4D8-3Vnc=~v5(qZ*EU$2AwQI5JWVOmHoi}rr- z{6M%Cgb-FPap@mRPG8Zkkjm{H^dn1XgM5b%wJ(k~GS#T?ym)iQief{-4JexnOI{hB?N~lNeku9V zWuGSiFEo)*GesT(J1Vmutd&&`c`!b9#9uRg|Jeyf)$*bc>roX9?mDNk$M(T5C^KJ+ zE4X(US#bMw%t2b;tz zUivh&Q4}w$eQzl?Se6BO$qR)mggyU<=j2&cNwc>mM91l#^eFU`J}2eZ4P3chVkB7k zo|VPXSZIU5#1srRNl!iOuMybMp;585utP2q9I?KShVViw^21-;@yFzozYJ~k{>I1N zn0XC2xfBa!HC^oATpV^j2OOn0tLqZbaEalrF?!Ex`m^#e8L4C zJS_EjpNNeq$53!I?#x}eA@f006dkxi;39Y%+$h`M<>K5r_tn%Z+6TsKh^H+tV^U~# zCV0Zhf=vFEwv9?+I z!3uAUV921|7j~2;WS}cR^o?TwMJnMCZrjnGZ!`%HW%2CJY8DBBM$?~Q4z1=J4t3{8 z7}xE2cU*d_m3lbV0xK1enC?CJA17Jnw0UU+uR7|!ob4PW(L(x$3mJ5Cmv#DidN%Dr zV;^^n{7iK9YX0w^37^tQ9!J|glzoOVHHr-p>A+cV++HQ*^R5_^Qi)}|?#VE0bnP7-~9al&T zyv3lU!mX;!Z~%f}04DNRQ!|K*71vsSxE`+a340p?+gS8?P|A6=gaBJ^UzHf2(#-$h z%mEMr8@M%zW1B_Q%?T2G`4=jgKGt)VAk=l}Uj1Czsf2{>*et|C!^`&aOC!s$rofg> ze<%-BwqEXv%$FDkY__C3#C`k-&F-R`tA_w+DpucjA;+o$IGK#xD&QoFPxvY9zPIh~X5 z?r9yqV|Y`m2EqJ-|9b$|R!DE^b(bWyidSq$ll*>?hj!^vtvg8SMRbo8sKpC}bJZr4 zHa=Q5Qrf4T(Hi{Czi@3ZbFad=Knn8EEn1t%AZYla*?ye+IYVzXQMLlVjp8&br`D^< z?YQx-o;u18La)2nxi%%ca2vAH2D6*W{66+?kWu#}@9khucUtr!<8Ll4(*}$V^$tI7 z0}ps1q*Z0?9F*_|$oRyIzjyVw?l~17w%uM?@Ra-TieyQ4V}?*zBqu@Lh%y*#D(QP3 z69u^xKjZ!Is5c6NrJ^E^>+`|wT1k^sOfxv$qs(2X2rV$Q-_clG1JiB%M*rJHPFk$p zDN-70N8@sr;AQ1Z*~w;zc#HO!q@6BDthh0Ynuu7qV7g#Da{S#iHF3&G@#DrAoU=jqNCpX(FdN)lY=bw`PXimQ#?hca))o_~ka z+a{NHI7`r$_L$Gz8a*3lS9Pd?u?zn1byd$;BH zKUmV%ID7unIo#Geo;kVrrrC`$tl!Z~A9yVj*}uzy;XA*jMlk#|(@}9p#%gKv6x68* zS&|5Xwe1zaUx2#De{qQH$+JClz_Db!WZ71KoQEOK=m7iuwEg*o2&{fu*-PWq+pV9W_N)rrmI^CThW4DShI>$Y}$%R!}A|0iUZ9$WF3 zm1oQM6T(prQ^gX6ert?o2b@WQczvmsw|Wh!E`u17Y$Q*@J< zi~02|6|FXRp%T`#5_DrSqIsAGWPp`?V{VL=+uo8p`&p~$p>{|nWB7DL#cg_) z@af*ExgbY(y=KcIBLhjGszCPsq4P7uB)rDta+!7YU-aUXx30GOKA> zWn%O3-;7-zPt(RzFameK{orK?)LYsOL*)M^wf}U-B82x%%xbkikU|2>tU^pNF2m2O zx|V1AMjQ8G6vJt=K{iJKq-j*>Q@Ob8(A+?HPhLWe)fqLr8xH5e*)9Y27n%@Q8=v_$ z3as@q2=+D)kC`LxIWA>7&aX0fFJ7w7s9Hpwkdpa(sHL0XLRLoDs1kVr1#I`O zLY=23TAfUJ&ftQ1mT;TVBk9kKSERMcRAK;V)@{N$QbRmyTv)6{aiNdh{CQOX#nsOC zaDBgSE!)mKb>pU2w;SZAP2|Fzn!pfS%@ur2h1KS3Z5D^keVG6s-x-{#9YnF}V4^>@ zB-qxt{LkT%uR7+n!T$WpEB=rPx z6EZt}ikLEZ)6s7r573m2hGgYpgQb0XwBz9MD^kl0*!lp`xBb>H9H%5dT>ER^xx{jsmvY8+kM+=nB#a< z#Dq=q2oh*hM0eXFX<3+o{z?#kjD6p{%W5eHvUo&+ST#XEFpRK^(=D z=i$4);h&G!kuwG*#KWcktjTd0yd0volk`KVL(R#s3+i|V1(KTvT2|}8asY;4GJ`z5 zRHxzruFy>GXK+i_JXy@7*NBEgTFaAf8LwV5pdo61pb*^KbACT8XxYe*cs3&ubOFUc;vG z@6~|*6-#iP^bZbz`5j2KOc55!@={)sVU1)9)F-Cw=i;u!Nc$Rd6WI3;HtSlqrR(*N6 zy;DA*B{QK9U*e5DxtAhRMVqDD+DG9CZQ&mfW}?}gGNJ|Etf+J%tPu_B1a?1=}2LYP(NF<<^!eg`;`F&4mf~1*A`XY=tuH7LK=dDhG#;_mUIFCDYXFu%C zW|3a9dW~2jlhgP_s|CS~JRS46<15>WzO~M9e-#KMnT;)9(9QQ%aihS zXK-?5y_GmjBEK$i;@wQJkWN;|!^j@}Wo*5wLSqmHW5Nw=+)>t_a80J6t}%WR*JCv!Pzt z1OVe|Pz~`wsSQ^uTD^k_X<7&hPxLU3u%}J!r&IFZI1mVFry!0PK5}YK-r~+j{8R@| z#>f`3npNB_FU_9nzYe{QT5O}v$17_#v1j#$-tiPOT%mxwQ@l5|j|(u-4Sswx&6TDh zp|$_xVe*?Mc0uUVa2M8DxF#_PwlKd%E&nN{#=*)q%<`U<AMUo8t(22;2q5pUF{YzrI`H{GR%r(a0DNtwIA!A;c6+* zR1{OU_=*v!s8Xi_WdJPXo&PLCs=sE~M>dV!Z}%e!zqJ|SJ|o_YytnwPQN=cS5Y9Rl zL5|OTyJ9);H&V^|>TC=>SmScUGQ@sLRM+}Z&IgseenL+x#(Ww}*b_drtWF1TNG^l) z)!FQ&M7J~d94hKbco2OB7q0E_*x(fMVA*9xGi_Ey7criRhW&V2*y)z>+UNc4r5YXm z$p=ye!bB27Y&`j59F@N-tgE(7<9CE?hbQiRJrzC$9g-D4uA&m$?-P-|Unbjn(Rhug z?2TeO@fdh6*cQI=4bxZi(_f_7>}#}qbbo>aP&)E=^P+h25|w|FX)V8MtRu1z8y-Y& z&x(D$m#`ZxHhms)?5#FZ=`@W_z?GpZ8HD7vRCgd3a>y+OThYM&z((}~l+Md%*@l<0 z$mlJ`&8#+vln)f4%4jSb7uPJd0ex$;-?CLeu+*I-|EP6Y+w5)bM^A$C0=xb^o_64z z{CO9+IFsUk5yooO$tmKD6UF4vdmw!Jnc}XFhV^9NmAuxoTojGVs-Hn@UCNx`YxaRz z3@JL!7%Nr@1_HWqUQlLM+3N>t4PmQC#a8Quo2c4cvO?MR_XE)Pj<#y?e|xb%=l;!* z5OSvzSDd9g%Id6xFZaf?2FpH=Xe3PYD~~kNZE{Ii93aP|tGN?>_=Jd@Nx7SPM=N z=ky%Al^|yb)^Mz|=dGmAiHVCankTxFz{I$LIa@yilRe2s` z<=AGHjBU9f$b{{(d}Zp-kxz57@qWp*Mie<033RlriUg4Y@a>DTWQNE^Nq6{PKU=^2@;s?#;UN?h=7skz&-Tk{oins=HZss1 zJqQN~3_S-6!&4Z*|)HYvD!d@LE{FCzwfpuw7G7IH4AYGYeL!M*$p{l33 zxn1d#xL)U%8ncm^OWKwrAE&%ry?t`-4U^}D;S_Hmzf#-Tg>?=0wshk72wE#L!8ZuyR5bM>17B}`7PoX>oH8r8Y5MW z`-aIsKg>_PmORIVgS1mgKVr$5B0%m)pa@AVBrn(Np_#y0W!BI*I^%HV^ws=W^WSvo zHh`o*H5bKE3Fb#nLxR760Bh~j5aU2XYq)>(A;Tlc*Fug#|Ka0w(7m5|LF4LeNF3vb zL+6)AX*!aP{ENydu&Jc}HxyuTCB%VG@%Upf!CvoMaaGr)$WZ0fD(i!KX-Zyg)_@M8 zA`1-;4cWyZp9|^9JK1UEcplcq+p4muEt=f!1W|vrQe3XZ3J>bswZ!`wbg z?C{Ce{A0M4r&ICTp7J^VXM+O~z6?*bq_g9X}n52#R7KFl!%GDv1WgSc@!pkW{ z`1NYZIZG#6F86bva~3r}PhjYKG-QCX9gz9az<|R{h*oc>`MZSB-)CuMwb&;v$8mN! zPNcG7VYUT2pPvhjyFXI`dlxXdNH@eg1$k+pzPN%jrb*M9CtX=R|7WM~<8k2eO|x}n zA!y*nsp=NX7+@jY*3$30kLF*daGBIT=(Kn11aD4^W_5NQn&N6Ee0I27%qb3QuUc`; z-z|vbX%mR`0W(_Qa)kUxHkuY?e|>B0RfyeQomm+y_^^u-tDb$o6-@PL>Y^E}c`elt z$c!E)cuJ{>e=hgnkhQP1;Ws%r=Uze1Dhc_WKk{X5wU(>nz$@uutu*;0P%Lbqmw(BN zJSU;4DlRm-#ytOUmWEdE6xdAZ1isT4jPyGKpKDp@!g`A zXS=Mm$T;3Onk&#LYboY!yBtwdXXwu89a4#hR1IAkB4{ZOJWeOlGB6DgqHc1)SbNM7 z1nsfh|GZCWNc}c%ife!Ibd~(D3~lkd#@$D@l%@rB?ez#vrh?C&Gkg-Izd8AegVvN)>#U{8CmzT%-R4LT% zv3dR{TiUFt@!!b9$huy!*?2xZ*?mVVTc!{1$djJks5X}B`OQ7YLtIA zd6Q4S+XJEqEo6w@)F&A-xns5s!WK`_4>aD6{+RF*V{dD>zVzH)!$9M+PO+}QLO&ul z`q+nqo0j*e#9hwwv?eD9JvHpEYkXS?*xWoudWKpbW6_7VGJLEgtaBd&dr79=r+~~%&bFq2QxX?b!UCJOy}Q12a>Lt&%4b#@ zXie4qBOD{MRQPI5eYUTT_!@hE7!V5$okF!xI;GVuZ+>XWIB-%g-iv(ylV^8XP;iLt zI^t7!yQSul@XiV>*S^qD4iCFX{M3&l@9+@}rNGv5fXXa&^}WiN?D8u53Y5^N%DUP| zkEyvbqCCPFBEy0(#pP_rck)=dn?dyZxgznjDQit`1)>xH)Vr?b?m# ztIgz}WWvW9Ho5h|QkW`Svx}o6AH7;}e#w{~)nkZPos~V>;?3{Utl5>RcVMOyis&=P zmv%1CzHwnnJtI>H2E|}E4I)6_IkR@)T`AQFaI8Mn$Fu;ToCg}sTDX7Mn0%;tuvQ=w z!n0k!Am|pNRdeUF8mp=0qi3Qx^EEtnePP}Hx*YamH?`XVsy4VQAxgqXAd`Nt$57_= z^P&F>^&i|KvmBy3pDcToyw=Ibv-e<3W+YNoxCsR!Np|I=_2<$h0av6V8Bo63XJz*M z0{7zcpfJ3uXyA8}V)3X^yS~3ZK8)~K%}5S}oD0aNvFycweNRyzjGnmTg>jTKi(Lgqi+>iy7ZD$y}>!BIY8IPWK2EXMd!Hyv7i23f;r{ z6$-OBUTx#oIWi#K}syOs2gS^bN4mUTVZ);qtfQa-pvY6i~*P)TcPcJV@gO6ZyU0c%x z-;*Dl($-_OvDN_-1KTlE&FA(KSGN5f!eKU5XU}vDgJf`ZvT@j7ekOTsY0ptyp`OM{D)nrk;DvkE@j8h#s#={^UerwvnJy=YM<(x>N+J7D&EXeL=tVqX4+^Yc zlXm95`4L|&2`@w?`~%&u=(2Vn6p3&1@@kcTf&6^iN=MJ~_%Hf4V`gsYBkOe2MZp3A zEQe3`ZDic(V&UXxqyS&7Vd`v&V6e$$0x&X?>6OOGa3F92pdIR~L_!?)f19rDc>y5HmSAP+^9>>cdE;bC3E-^oLm*T7%5@ zHZ({(zncuiB1^)zaaZ{oyT5&o_KI-I^EAwW#oSiQtu|Z%M2Yfp+}eQ8q-lFdlu#o$ z4QeG7ap1;H#9aO<=rFN;u7?1*T?b{5LnAc~EK-Em!joCN@CmzS+ZeXsY+4D{ zfeB@W1iX=*#sZD$i+Jqxi+^X!W&37f4h8yr3aUga2Z%0tj@~}LK+IjR7ylh?8OG-) zC5*JIZiG<)5%t3`fU`w5pQibbg#vnzSq^We1S94Lu;XgWjFYjJoGb3*cxUeTdcDKUpux{4?*WXq1TT+}A30f;ZX0)e{UvJKnE z5g5HPQz>oEl3@iN3!*og8+Tm!`s!f73S`~(TRbR^i-ksuu!b{ww;&9&f!82L@JklB zeyrQBg%Cd+0v3C?zAg@xPkEkvRu~k+3tWvq7`ys@ey{^H???jI+1#0U!qZlTQ&*?@ znqz)Ik~rEY(~TwkgGB}gv${?J_3<`_b6Vx`xe5kgdwE6`;+R=hh=DIjYe8g+BbU8d#8@#DQcD{HqGPJv@ljkMGG0UzJ!0JuwUJRw(5GoG8qdqnLKOPDoZ!Mg8&8ii z$Y~G8HclOa<*oCQ-F5!Jg=1E(r>&mCD!{l3>;)$s#)sk5*TgFdq@7$W7<9(b$U-P! z7|-Wg59k5C6p@K}j<+Mz+=S*YXXa=&s474`Lo&lr0csfF6qr@rlHUm?`^)fVVPL4Y zc>CvOqYa1LUwuh)ydyByq^gXiow8vu$gst3xy&oy z>G0ovL5*yQO|Cfo&28C=@=PC6{H1hJyT7$d19BIC@zR>%A{rA;4U85o{OueXf}?Df z&kXyWi;SVr4ExpI3aip2#VSa^1JDnO!?oBW_Or>)E`fDPk)H}ASi-4*r2zn}6GcpM z+;7Ae?y}}PT{`Wyk6`}-Zpkxk?&T1&^HZ3vS3w0>Nm?g*?uZzI5@mC|`wJi0UE`8y z5QT;00>8_N$?@XyPC;>bo`2(83Rq{#ypcfN4R2_A7*RqWfb;}rcNDhu81HMUD7uiq zh#0_^ktL4cI5|<;BDFerFRd&fC8?EO`Jmj#z^n!HbLu|2&{=#Y2Kz8yGzH2L%W0Oh0VpLc9JQE5l-xCTz|{WC&T-<0$| zKCL{PVU!cGV}CF2+Pt#$#V{~M>5KszFe!_=Us4}bxD(a`)+Bk$RVjOS8_viRp%~17 zDj9y8m>8359Q+~r{;{2=S26907iYH?jRO|(6=_SPpL&c*%sl)I0upHp?C{KdsYONT z{0MzqtoB=M_-AG49IDaHh%e7j0RvQVp2u`0p4+!`-R`n8DTwCFi!l zBW1Zj@#y%??%~I$=b$sOoY$r(jC9-84FlfEYcWM>g*4d49kZDo#b+^F)h0fo{CBT3 zUaF4)u`*g;ttPe<=ERPo>rRQe?rmm-ul6OP@v${XUVO6G7nQH4c5Ph1EdWP4|JIUg7kBqAROs$ z)HZ6;PIMLw8BEv$h`MhkwJEIpzEz#86PC}T=}&Dp(RUwYnXhlOb4k+4(x@pEm6x+m z+ES)K@{u#yMfpAf6RD#x_~hjX&tJnVi^MWw`ICG#`frvPm}v}zOt@!7=&?*DPly2p zkV%1N@E1qqB`}zVp63==b%RAl5&>b{!JV5_+iw@YHR>VR70jJUz6WV9Q~|pyK$Cq( zq1md;qx9P~?(-$G^IqaEsK#7~nUZfgj97dggf_`hUXHche6Otc*7S?cC4uWfBFzpo zhOt+v{$av26YD4IKCkPd3FF~1@!68unCF+xnObUWCxQ=z=hossgji~fP4=$4en{#= ziAY2k?t6b~c*;!Pwy)i3lz?1*E!J?h9opH4IFJtsW|0%%eoJbV{lzSCdu3@ZpkC)s zYySTzFDdr-rWoMsqxA4wJ63AX)vJaW_a@Vum|-zFar77`IP&hD#lEf;-9W;BO5gs= zYgLbM#rYWKVysbB8OQ91$dPagA)UUrZC!G{zqWJOl?wc1L(?vmwTUc;NkkQyfSawL z?QBo{-Ge|W>IhS~$sELzN07?}v?KN~vO;NsjM$#u{Y2(^!bwZh_mx`eiCwu8Vj4Jc%~c=&o)NplDmphzMe>OSqHNKMyKUQ zgEqQkS7Bu%y(&b~VXro95_UZRBOa_T0}LL+*)Wryq@V8dH6t$eBbFjeCgW?Tw3abl zn&b#`bli`m>LPp4@xlQOf$cD2X_qCdg55(NB;*Nh)~hcO%( zII*(`alwbnJnF-M(1|uj^*t-dm=&0D85XJEjd*`yVv8*RYI73!!Fz>IJMcQVv34g$}{#Af&;=fjuG z3|>k=g#B!NyxhZcTDX<|ynz)P7b8r?q`Q>fLAcoenS)svse%^4K@8UK&D8GciLo6+yBD&!9$lOQoqfVTFK8CID{tydNi2pNLotlUmB(BUW@^mlLWR( zB(%=(T|%d5Hf+>85PH~b)0UipiD4w1DO-1A=;E(Ky;0_E5j1GIbZZyBCV7Nu(9Tw< z)e-F=<5_9i)i!!c5Us3enCz)5O1`lMCm5-DDcG8R@_xOP>J_@+6~*uV64J{1c-CD5I28`>{r4O<`3p%QM>Ixjo}4V1%p7kA1S4Z?YR21_z^4)@9&7)(T}wtopz0 zxc4T9OAAeHvUT$~t8x;wjc>Ch?X-E$EM(xd`-0cbe}c<<&i(LbZlJjJSZ?p;vJcVJ zA^v4wl=srgm#t+NtnzkOpz*motHgg}>;e#ndslrUJG)Iv>i2{sgY(4K3+jX7i>j)Q zvZuQ7k-I|yRjbQNvqbb()Rxxso=ukXuR0&uKf1YpWI%6CYf|+^soT4lX)UtOYlYZ0 z0S03UGq^?){@q7K>`l!T+~z~z1o5-TDEZ+hIvtx%>H7Ew&5=IL`tlgPZL(4N)+%L& zyw4yAqcH1>6kX0Q;00cwN5r@OD4zUof?*PB<5c~SA|e#4rc=V@j+4xfEUUX2*_r|^ zS;|t-+%=U5$N=74XH|q*jn%wb7X5q6P#{w;H8=PdU+e@n-cbf%*?iKaYXeY?)j0{& zd_@5mE=dfLIL8 z^uU-8Zt7GV4=;Sg5GiDa zVkA|t7r9bAn1&^t1$}8fHCYmb~G364Mr# zCV=}i`Bhralq5Fggy${jM~GTVLuv5Sq4B?&wR9stm7s-4eZyE|)NfgGmN?Al`zayT z>SB$v9X`Opu&zgIEc5Ae`fV>E6b)>qkl0rkFU^HgoE*_4Q2Kxlo{q7mTeTUj{#^xh zNNBXNHirY8+tcd@&Qavf_aKOsNHGj~1e+S?tumRi0d18*RuDjGpLQ)K+Z@qIEWcvb z#3}xl0g>|&gjC7mN*ItT!AQRM$GB{J@#~X3Uyy>KZ)1&oYQ92{h*LMK1^m_1HEM07 zX=2h}pVk7dlClBHZHY9!cj=BmR=&cA7f*eX2H1M=G&^n<06ubXU><54O@0J#a^H}n zEp$%oC0kOOy=`qYYYyqHZ7|`CQb51}f$TuwoWOG5Emg9f6DzJ!&a0yoWzmSI_fr4k zJt!)kUm>aR4|(5#i`xCEu`UpW|9R_GnM&Z{5rk@?^`l#Su;fzrlZ$Z$-bc^c1wvCH zUw~NVy9)o+G@k3_Q@ZQcu#cIHurl2^chhg9;%@Ig$UnYSKXL3!pb;erR-4>19O z6LrCUULA+tl2`;)S@(*yhpo1qxDcL5RWxi*HMHlk6cl+v9sw0Lvlemihw@`AI+pKM zeiZI+hdwYSid0o>Y%{om{@*3^R9+_~7DUB_Hnak+3 zNbmVml@-u}z?rh7(wlJNxG$P2AsS%Kt|hRcSnVIapyY@ojUg8U7mC0zV6$Z{B#Sfw zt}P$IydMWB9`t5__TSG<&Zyq;GJBHO7+gYt)Z2OzQ&3`Ovwlzg#4(=-)1%A8tbtSk z1TiT~;ImKy=7Q}u3ZS=w7hQ{gWsA&zKdU`qfyeGT=!6hUjtd<6vx;9z&rN}X5fXdO z;A2V%Yn`E}KN;dufw3k75a=v=0VEqq_(zam-bF2+YX-^|v1y62ek-F{DzP!mj6P5| zB(o$Cpn0?~6r5*)Z>;bSiNOR319=edH6L(pRBf`Vy0${C+$ngG46vN#KX#Q_z6?Fc+qc4YC9Z3t`_mhiy7QsFq>iVUYf3 zKhRML5XoHgf31PCXYKuXDIVFeHpO~rQ+N&>;e2#MBQRrNU51gZ>ltRbP~m}EmVMlj z&o%}_xCNW$c~($JJ2_X#g=Rw2$WyeY1z>U=CMp z+)p~U(LIJ7m1xYJl85MYwoD9HLCz^o3!(Xeu|~Jz*?b60^!1Ox3gp&_YIn2p=_*9g z>sy&>x5;QCIdQ0Wa5&jl=fIUXku@yb9ose{wpY8FmOEZN@<^vObF5Mak>GX!ONiCB z?mqtrEat;+@iG>+slxk1G?ptgFQqUeLHEOIq$2GPVBiju?Xlwa(I!SjWsZxuVIl2| zH5nk2LID%|PaCp>cFl91PZ;q}uoh0sJsEVc1rkxI-fmLuA+kAC(iH;;T0S!);iLYO5cXzWHE@c%7yr8mLv5x$q--%jKC7AjeRG zi2@-S4Ngq(Pql;n_FzX+=MMV*+qdV?e}@9o)`?p_eHjIXuK?1W<%T<&pLj5xTlH-; z!5MGp9lh#k5dP6+GC(fHpAGvC2GmU`C*@1dWOsSsJc{!)SU48V@UHPmIw?zocfT5tx|&dbi3$$hDxI*QUcFIShO9djXp0zXE); z1s3drT%rYiBWHGh*?#_q1juV!{)8hQGy42%K4Byh^Ub#Zt1#I{Xl&KuZ;{)e-j;v{ zWQWRp1I02k^qhC%e(|$$I=ud|wVAd{GRXC^Z8Ss2^`fxgGUEuOqBP8}M{87^^9tnr z#2U8vm2xQ2k{KTE{}N2x$C2Zm<4z~*zAKz0H?E~tV>sTIn>f6ffqb&`uA_)qjz;!1 zLifjz@#ArykH_Y1a8P|Mhm~Fkn&`L~@3V4Hi~qeRODA6~$VWW^mZ6EeSJdTg{#I0Z zZ~!%n23MrQIwUWf1#ifMJHZ5tGqml|eI6Bvy@2+}YI#a7(Va-GbXL7-@bZqQ@sf(k z$P+C5(r2?Z+39xO=stb?V`$)vR zlT$xmM-&OnK+YnkUxrY7EOO=)WoZW0Z+f_>xmZA^VE5g=8+g)c#$!=z}|D zcRw)3w|WW4HM1aNw(P_d>qDsa^}@Qcdeu}9)c^tLjR1&^mJm})>$w*JG(kDUi4Q9?Bx^woq}o^$sbatX?jJN zRd;B`q;_`qUZ)Z1neEUAfng3G4SX`H(mP0A?k=W&)gBo%bd2pN&Z_K*u$+#iu+xTG z$Z^CSY{RX%lKNk`#(xD_5>tmdL4nYY42?0q7K}u?+S#m*;h6ThG1VrhMRZwz~?xm%X?%(?T{Rh8ue&@V%-aGHk zn>#mV=DxX~JLT5A2(V%vJa<;2xwm)4CVumX4|g6_6sP!@MCv7`MRLe{>^H0Y2tH9k zX^SpnE^P~(LHW0_tWo4hzld`IaOPMZZEqaCSS0ll#iD8Jjy!{0;L?&^6TL1EgbQ!u z@%D-h69Y?a(%kr!s1CRZp#-4q=7)2^Zh^~3b#vy}^=+a)oONa7ODhqbn{E!c>hgO> z&y2$n5dI+JEIBPl|FdS$s3QKrj`|KjtGmDHh1@ApTzVG=00kd!7%eSTlTG7%)$mF? zDKjopu;@$*_ybOP!LIb`e~5yKHU`b77mWgNz?n6X&)@Gyyax-w`UGUJV_&YZrs+1_ zjI;s9I^L?r+UPUtr`qAP)s7TilnZEy>U44)Fuh;j`t18(3UNKd)E4sH)wS#=WVm1x z!}-c0Kg#j8UY}k!wdmhBAUN*GEnoFDCPXFt+licETaajrAc5jwSzW}p&>UMpQyyA z|BK!~XxHZ>RLqPFEUn~s0b2N9cQdFPdBZ$mDvn2H)^xVGosECTJ|1nYp1{!avUkXL ziVZGs3uO-FnHsGv8^hCUU!KKOtu9UmK-?dG_y~)Xl@~JW8n>0OjoSbA?R?VckCRzc z!QW|!apPpM*iVg1pt=iXQd4VIrbzr$y<=&xDlw@KD-MD!AYh!FL|`z@B%APVFA>U9 z)^iK`rjSu0Ee%ov|BNi+j{)xCA>2?p;8ep=d_H1MMhlTx@HtNBz>bnaczeL;&1@a8 zfWbXi-}a%;jL_8Fe+KdiPS=^oXjANKg=w{cMr5pfk!M}5D&U{-INcG`G9)8pnVfcmbG$RSw*`%N6MhJ7|cd zJ70c@Aa^|Cd~p_GVN3RIiv_bbK$wS)2dXxMa27BSvZ{gm5$H zy;Enc8iI^IbM_ol5LV*NtYGTCPZ}{SKQ=Ny{3G% z`=}$Z;zkruy5Dn(xj>#EOUJD4wA={>1RTYCT2kVB#CatFpOFfC9D8~e8Dt}6NhUw2 zC23vj19g0lm`G(F(8$s;$I5fci%Hh9&~2J*G3ZLcg}a=vhguTWy(oopEfT5krPw zg7;3%W11es`teLrHcu>P&%1tyV9b)59Gf6+%19o5yU&5;vCJ8TDHhdACVwRHUvG46D0&FUg(xU zPi&8aBU^N^HIy$xNjv# zNqAx$LhN#ZIsY9t^io%vWgSG)bG#U&fuj>_S_yJF1LuaiCwyUrfA`ONN zk0kbd;YnM%dSIrpNYAz8MiNMmkknmhL93_TcRt1( zS`O$}t^EOQYCr5gLTHh~?rP$@K4z*QUUW4+M45{}OsF&Q1{2ukN4xzo0ni_ro2lL_ z*zd_mU}QyEGT_lw&Uw$OIfqe86nyd`h45~k*^Spej0`KV)WWwGJ6H0TUPuNH${L;8 z^J@YxMi;x=2z*0{Y$)~H2R7vkJz&;lndG%AOJ8yhY5AW0y$t?Ts3ZT z;pWEEsc=%*h_I7(WRM)toiQWBUZ#wz{P^k>O=ZCbDFooVpg`MG?73;(_y`?i&n;ItL-o; z`RUO9{I#!s&```9x0Ylr&`k>UP69sw&sOf@^yQ>Jy;FPE=-0&_knQ%1y$Q-q--*?(#=5ve4Mf!jH==;hvhkGQX1ver35bP^$H>xY{ z#onW0wt`_TOa``qvfr*H+QB@X_2yiOwI5LNzXS#*yrshmqEO?DbcT6lZWgtTLcP9V zF%3XF^*Q$09aiYRSC0pZDl+5!$uP%G0eo1aF9M-)7Ke1#GH|OA1n76np$3&JYPxP_$Y+a1k@?6jWnA^(NZ6K>utkjC=F+W7IYG(XkMgfkuJ>W*xaOGWoX7P~DL4 z%xea?cgN*WE%VUqPXkrlcc}N3Wkg%TQyGnoAn=0-a1(7b_cH-ZDELa9+5z-EA zn#zSP3eW|qQ>qjTPIAI&vv2NnZO)t+dKeQX|Id*6nbRW`tM7FoH>K*%hp|7l>K{m} zh9P+^aKtf@;OFGfW-;XwW@5d#2(FJ~2QgbTEf+nd`WC-()=-vcox>3drpZp*e*L)1f&~RSJ ze#RB{QBAETFT%A>!aGus2c(VHKtCqKJpw2%@fs6c zla#PVCZihfLltlhe3thW;y_SHzQfr@X{?;!b*)c{Imsj53D2tU#&TU{=r-tO;pA-B z*eGvLYJ=3)M1BMIJNw*}(^RrUD1JXA8vDrQ@&r(y68j0(OnVdaiRh;I+Yq6#<|p&# zgCx=&ZSZU*Nn`Ki2%{%OmBfFi0#$A*2-r8l%oLq?TRZA{nHt|u!v@3T8 z;QMxAQ~_gR51wUo8#2OiH1jOiBvD=Je$3bT#~iVrOOjLv;)-Ite4fr+UciQRm{8(L z*kt1o3Y1RY5Q?mfrz07){Niy+EGYyP@{Q5dUU=^$nMP@kW@RDUu=}V>?tNYa=wu|D z%h<=2|0bet`VRI?;IBZK9X^%_sKDTZf#<9gT&Y;qZt%b^`eRt$%_vbJ2)(#1MX<3e za0tPL1VbP)-5ScSUy>ywv;r^I@icg`b`w$_p^%Gk~rcS{Wc$l z#gI(Yu!O1G_%%PJ?b8YF6xOqtq{SYN<2&;r4)HS+G3kF?pL{rr2xxf{g`$x^0JAXy zS}lzohfS+LsUQ14kk^D=u(2n)ip?h+`#ve+qG5n(;eWy~*p&Do+sX$}>GP_Hp@P=) zJQUp%{tRUFeGT38!NWA)4_C6tYo;QVN5)Xb>DHXY!rBo_%LJvvgD&Ur^aZ{2C90(Z z19PLVa%^!gx4muL46kjZvxdV+%9P<&jCY%=F9>A1qKOM)d`jV^Rt;M>5~?%fl-fPC zp)AYK`?C_#d{rlO3HRkYe+y_veR}HfWlBHa68rQJZ zOiTHvWCsoN(X!8qMUcC0B4zB~vrvd+4E~AI;L>;(@ssmTushZWRv8`>H$5J(3 z6IQX=A%e!{y;#%%Mwi&;OA&3YD1@u}-S8hc_j3&qPmv?YwRQ5us{yU&4s&lkrA}Uq zYCjf2cq`sfqn1PH%~!c=6H zVasA>KS@7+8OI-zW{-fW87}-rcCQ#MJa%ldF_hJSW`&*^-GQxU`o8}m=4&2KF4e7 zM9|3oczJ;IW?OSv)k=~aVnUdt7Gl%oZvR?y0Cu-RowE|W#0ZP&{q9$(VH5Zxo;bh0 zDGK09tSy_*-NLJwzHd@YKMES7-3boA@K(}uAOMV}bR2*#=h?7_J{VK*C%OkJ%TmQy zjeG0zlDRg6`BRwrq~W#4v=SC*j~1Nk>6`jpEo&;NarP{^+pc7Nop4)>@7(ZOYaVYG zLb1M^uzAf+Da3lOxQo)Jj)pB^{~R((qSAMr$Q8^*Zpc1MK{NPCMT#$qi^X?HfPSxd zS6{oP8c^%{%<6FAJ+Q#V`IL3qbPr+rJ1;JmLA3e&O%$k-NyG`sTMCba415cNMjqxn z4Rzj{bmf=?70*|(_1TG)tqMK;vtzufK04;bPY8TeG(=O&y=h%L9(*eDIc~o(|Avh- z7(I$X*;lj0bcUkTt7_KNy(DckJzWp)LG$_gPb5+!JKlme=>%9Nn^BFs5sHS5sQEK} z&_$f&)4DJ9(QW;`C<258-U(*PrbO8D;4?9^09~Q#ek=L}WhsT2uH^XpYt@$GnjdA| z;jVbqTx1cP;kK>T*t zHYfuX)tjN?hwjNsl^n^{5~- zMddUmG_v2$Lr>K6Z11bpidT@1tkHET+$BBvOk@~SMqiM8ae1;vwW?kysM}M>NEZ+G z2N!_iem;a%cbaZh6I`o%aWTvj`#}YN-c7(Yii?2)Q^tBDqN*(8w%wD^{gR+#qx$l8 zra6eZvFrimGS#jtwF)OQhJr;Qo!;XGGQE_MhNTlqO(sMZ5BV&7hm4}gIh+fmLf>in zfH@)Drm%>a3_m!Rgz&KF7#OWih8UJrB^QJECK z(Jc|Ps8iP4JHWfuuZ%$GZL-2l6kIY0YvG40&#h&0CYnFzDO#9DsKyp;a>vSQokCQ;HMdvb7b_4TFJ zx;yLci!#_cmyu0| z?{IN$a6RczAYYi;yYC&AJ`j1Md=^7qv0Zg`5dv;mSbOtYOUqA&?rB!iPixvw7B3rT zm48Nj{k48YKSlkq((P{Bhr>b8det&>giLr-?P6;}ks7GmCKIj=~%Ri+1^?ue$ zEo1VYdw`X4ihf|(YiG~xd^aSvQNBc_CT2+wN%x#q0*~=L|7HI^zq#EJ;1jFU(@R@r zHNlN*hwB?`#zJ2_Si%?~xIEg;iVP6?)ih%)`bGLcw<^Ao=!68mpo;r~>L8&avV4pLGZF7PUs#zoJF)W1g-U`&-}@5|K~ zEdy9iZ+Bz+=Wd6*+nJiQCjYpVwSc!JCUXXk2AMjXG{8_2i`?PG3O4!?=yx!M)s-On?+4E`PoZs8G{wi^$ z{+P@~mVt*J^j)$sW1!@>Pg78vbd`20`2TVZ$EbM%0044rsEYA_91^G6kH+H6CFFk& Q;@$(a)eWGPsso8wp(;w!IGC?70RX^}mHDU!0Lb8v$iQ7vT^_bQ;d7eGIOvhNsrjK&kSK$#H+3nh&lAx*+}E$Lt!UM0g~{|)*{*%N@L96w>B|^*%^11KI1^4kBmax4 z9%jQ``(DxN0=m3yfLqL;YUDf`mhLRo9`;L-_1&QAxG2@1ina8C0?oUHmMQ!QQp^b+ zEM=yY45Eye8S}k1ui%)TZ~L4yt_{4z6R|Lt^?kAjOJ6be-`kf_Vk-m{8_pzOcf}YN z(Yb5t7%7#e^h4tBbl99b$K=EDeLEXCp0D(T#OdZHw-3B9N7;@|#gFIXZs79&X)y^?n?e5G(h_-7ckz z;7{PULN^{U*JoK&zljU<;53!?ZC&n~;RkJ-ylx`arXmKhdtLS?{Eg}FZJ!G|v^DD; z=~`rtOFwt$*IsOWL^>b1sji}G())}!0uH%+mz)Vrx%{hm zwE>tKQjW4XghvA6o}*S!yokVkT0!LpNPHno(AGDPYGQU3uIS~<&i?*CB=Eo9bTQ>z zs>>;=$xSAY?!s9}k_upgOGkmF;M!yU!PJ^yYHUodQgnF9s5TcWf_%sddDwWzNiP!k6@NCMM&(U6QZ#tzd2{5rbsp|bvRol6R2QJL!Hot0Y;CZE zj1GA5Mv^JVCCIsTsPpY5DSl}C^nhN{qXd?iBW_sNqf>h0h- z|K3kwuHc~O$7oJbxz_SOmK2`H@M8d9{`Os7oa{qwktA$4SHG{t!S5S|I|lZOP1WaW z3fqMv)I(l5os?0|C0dd;r6Y?1M`PTFLhHo@A3Eic0h)KYusvWw#%==6;ZiCtbF!p8 z^mO@D{ddTrt)fyww`|C!>n;=CT{Vxz9l~wJL2+D^Yu)n0bARx$+uz9Lm$b69>a>mq zcpHbW9nOjxJq+4EqVVv7TT|fb~q26TI$wQyd`volafu0?C0f zVb<|;@4`y>>7145(tW#h+;T=_pmqux?d_)XED0XEyL+nhz}MEMR{fORoNh6-=jgFd z7puzQJbMK0(Sk?xEd0MDTKRZ8DserU>?hxQkWO$VI-3b&@5bA9A8H|AAt5HXS;_Z83wo9EEJrI+fYE5m{kJBmdBVwClr{H~I4?@B(9EzI7?zr(Nb=cTC1kwyPJMwK4TwJzud=_-!qWvD9S7YbH(1VJR-knz%Zz zi4=t#Wt%c(a_#WJ{OSF$i$gY%hv}-jmzQ@Q5{j@+?+MwA?`Qs#%R~w+6x23%?sKM8 zB2_byVhKW9ku~|9YpvO>&-l^u=~18Au}%BFEOTjU`mqFW$V67i*xQ^ZLh7)Wx3HHl zul;OJV-?9nw`_lXv5uaBLOIH9AAB-}5u^JQlDKa@77o2u7=DJA`L-RmU_z^H?rC!=k^V!=-$f*f`0be+;S#oy6BMZ*>2_pZW~=aPU>F0LMuLTyhS0 zX&94e#{PEBuF;e>eV&1%aM-Eg`6640o+C^%RpO)lpn%i%=QPnGsC?wv=&wPiWK(EV zXgulT+>ca@)sOhV^D;8Ie5KCZn^FKXVxf@!y2YhweyYM(5=8BcQ$J-U<7 z*wGS_Z@aO<0~ZzK!(jU?Y%V9m6o*U@g?DX77rLU9etbMO zN91#hLl#-#{Eej$i*M7w6$;zul&yoN#198S&ww|ykeo;w-Zin|ou(=6uinlDiOS9L zO)gEIMjHKn5}5dpH=hYveSU@!k*QnNe1QAAk_gRp^h%sMlLiLc(`Z>EnIU@t3v z=Y9jOjY%#)F4XwBuN)Ht%z3 zv#qJlk!DoK-?`o|ytuzhoe&PmGxL7cA~Myn+kxq{O%jm9gM@aN7f;S+V3aEreObRGU0rl#bbY#zW(yQXB{TyXy#hn^!3MD419{;^&<^&#Jgk{=`M2v=FhKI~nYr%2lJXI>v*H8aD|29T=(Wi+ zyMCc?ca;10T@A*!UKdK(G?=aakV^`*cZOkNKhGXx%^+9{TCTu+AIo--Cr7|9BnQe^AS83C|xH$x$%&M zI%+@TuT4!Y%Mu~~$2KCfnr+l-zQQJ#j~=wmt$3C?sYNrm6`f?nAEUO%|B3d|49o*V zizMP)pPP}ZfOd#h2_LCweT4|cfTFC|U1=zs=d^$Dd1`56c=Ah8l zIIzpS2D=Oc*ch`3b6`>jCK>ZVG$01}WL9JVA4Cn9SFWm1qLCpPbfdbCu} z8n~o|#C_X{i?g`q(Zv%+68>$iXQQo-Pb!HF#Nfl!bX&bUWAAXeeZXK378c716_8hN zu`%XcaF-?R!RzO{$Ve2{7>cqT&m)N?1;K$ch!Fx@y^y>eI!fiKj^7>s%(YJaQKR%f z_@-J-t}aMENFRD61;4r|2jcR0IS|`}dw)FK5A(YW1pCeQM0ys=BKEq^m)|6R=Zp$J zi(=hapv5}bAX~zD`Roy6q0oBnEr>eaaDZi9pD70u${a)%G~9iJn#p2gdJ1Dmu#eub zg+8UvR0YoD$J?(?A2|-bv5;2%C>P4`BetSaZT;Hg#dJo3`|M4p2i`)l^<30bV)=oo zkf0_Pk35Y$jr2{aFw|~&`eCh-LCaAQ>=u38*`BP*kH%!UqVNfk#0+=l_XtbEez zd?Ccg^JXHIY){=my0ZXlm}g0l<~+t8+Kw-m?*YQFLg**jE6i_1RDO$tVpd+W8ga?$ zW5v2vvkTDAldqC>XLYHwz1Cf+;C{1%vcYF{Zj~Vqa}4)_lTdZ2I!ui#bm;jYdvV$S z7T5^2bW%}aa*Bv^td9Iqkn*_sqrp{Yv6)mp2aHBsYF&98L?Fw zUsM{a3-!W2ShZY8dh_j)SHsdPM6>mwH9RHP;_?-->FZ>q7r82O6iQ$Whf2Y#>UgzE zu7Vv?Fy!i%zStM>=5<8Aozb984{Y3eAU9tc;W^)r?U+%OB=`UOi?d@fC@)A1l0%7{ zF~waxTT~r3NXG@VzsQ*&0dtKuV##)Bik(ftc5NK~@XeY zNPC7#ri#O5npAmdBIo;`e&s*%EHzN#pgr7*g?xT|F)?7!IL{lET=8{IU89pWEuZ4h zWMqzYT7~PWaIWwT(JU?~JU0DGPXOK-Qmw6_7btv=0d^J`!j~zxS0{d<)<#gxFFY4U z6)v()IT$B&jse7Xa$)^w!;D%bHKkzFK?s0lQqiyJ-P6A5hm^k)eYExMxJs`2^OY>K znCF+|Co7^ZT?0S6{{?Vf$);juanIq?-1bc;Q=tlAh58(?_Li7LzR19lFY79`f?KP^ zv+Zr4g{fl_>st6Bn(J%&fSg)tutPUyVM18h8+l(K8_{c7dVK{u zA|qH<4r0{$Q52}PTJy4Ed?kqn4OXm9HQ1PW`x7_|6&e~)HYK|S<9kFuZ**A?i=y!p z5pLL|34j<++UK*n9si5($cQ&u8awD<`YO>i0G*{wIsM<`_nL5-_*_b52t{v7l8N)Y zbsH2p`--)6sW`|1jj4f4S7vT>Lbj3PCvc@iq1Q^^$YlJ??V}F$NN-aLN?1+v z@yVW*kxgp$`d@vE^ph2NOW@#TXnW7{?ahXeSPywxPc4V!ImfF(3Z#2!2v3ww@@k-L z^6DT1k~kVTtT0r^!;!HAh+q4u8l`To4~_V9-{h<-6UF0ni-q^2VE_l&go6e%?So%g zun*G*EX=}4Oo}?s#8)UxlAn5yrsBA{KP!qX5FG?lL;0?GC`1vw=~MXnO(W-c57=ps z(Jk0xY;00~xGxPMNrwBCK-NEo_x2DVG-v#xhu`TTZZyrfM(&u=?GvI{$N90rhloLC&aZr~NK@%>pPSJq6 zu;-(!sK`NxozTi6TU!3zvFH+iI-Ot;HGa40Gu{^(e0DSMNFavyvl`O>N6e0=O!7^# zAgrq&8)2z~bVS}!>dAJD{nUf>!5)` z*3mazr|Cn{0hbplK5yAvoL4z0pGK5+;f~T9pv)lXj$2XRtk(IBMK3;7f+^<|Isi=# zE#4*Nyz6;ol(c8D`WiBwI{agJs+UvQr@%h!vF5*Z0qIvRZU4T)eDH^_hEPkMbu;HI z0Z0G1?L)02h^k`%>LEx@7!(|l013vHk3b`)24EnU!+)m!Wn0Bx(f|D+mjwM)jhQ^*SBAT|yi{tq94P1Ex`?k)Ot8h)@-WHoJ00BYIWQ@Ab0S}^<#@LE7|&LYst3)EL;>(4LO!)N2p6<+=@o5 z5T%SG!aUfXG}sWlJEvCgwcrt_U`{TjEdJ2$zp(sbRZ?p5v#h)pPe+EFLAnw>@Ya%_QP1D_b zU~4%$JuwiYf$XL@Q<4}D_ex+-hM|AjMoM$_%pu=TGX+@<(*q{FEcA_aPPb3b0nOH| z)^!lbku;~We+6&%0piYC78#iI&-bBGpc6#QxewHvK3tPsJwDfqyIr}9jv!cMW}H-^ zXGF0~^M6}jx51B>hdsnc%b1Pig`f1tS}z_f-n4YU-+z}bO;!Z@>tdZd4*>>b8#fRm zOSTx7rsRX2;4?21zq*(XQx98z%_L^Zcxr0(D(0cJ-!Wg)1RA4-gZpxzR9?7V;^UP# zAXdVsL4x8`C+sa=jYb-5%MWy$KB$cW`uey#X>l-L#39H7BjgfXxODS- zx^AS2T#i(~5^jt^SKJ z1jlMlqjW~&#-e_9h>s&IPt;*8ldJJ*&LXB<|JBH}3YWxX$E{U?xZ{;^cAS2w`2Gxd zr79N>T)imFPd^^Vk7x(U6{DoB&F!J`2C4fLjF6TC)%PU|ufs?{nyJg$2%Of}&rMP9 zFZuB(`tg_Y)3EMQ`IR9&@5d`g6m9>^&|(1Ze!>LNy`JqP?TCDM*#+`Ejoga^Xpm{< z%irvwS?A_x#BW4UKZNnM9G@JeSt^mhMrQs|)>q2c+V0{Es>wAyU&yxBi?GHaYI&pV zif?bcADFE3GV@F2Ga~r#$w|KgRs2P)AH7FuZL1{M-!Jof*Md$qW$#45+0}Hcg=|)b zfQBSiLH)#*6-SCP<$tKKLo+7g987+r@yZ(p9>s$X$WV~W+5{W{E4tG&qd*uR*rjgY zQC%3#4kYe|VHq4ObEGP5>bfxv{Fd;3MKMsm`}ZMGUxcTWP=iluMu)=G%snYWvkb8W z{xje8h7hxf*RlaCnR+M99iu@+IW9?#^_(z4Dt$FrU@)pZA03hM7*EfrhU_iZ?jpuc ztp4Cy=$59v7Paa#i?Gu-#KCyI zIq-n8;YaAO0E|$OYh1LzWOC~TmNvTX-v==@Lv~lMQ$qt^xtAc7rI6hTa!1rFLTCC3 zb#wsxzsq(}_{+lOQhoL!FVliV5N2R}v-q-M?(GnPB(ea~d;`%&b>}-UGpVMFN=;p= z8%gow@!V*QQn%XRhPt}-MtmQ)Fd?y@Nc>Xcl~l2#ML%F|rIcrooB(u%AKO+oslN># zKH@O*^(BZhx>%1|wWR5UEDI%q*E(AbnAI>2A$6_sycacIX#auAqQTdX9YkPJx57po z@4U8h?-l3<88x;+O_Esp$e=3cj+6j=^$E-cJKC3?0^KUf>I(r`hsbVK^iAi3U*6w?=0C7FJm&9L(ui#9x#ie@xe6d(DGN3U z6vn-#X7HBraRs_fnu39hy)JE|OLgR8f1p`-!*Tx!FsNg`D#hV6dyh39cwAAX^dAzV zllAd7mb`$sjKO|h5=LCMk?KJY6fwAe3WK|lpvxq*ShRtkN|Dd z)d1nwFU&bs^Pk#ZH~(mBu}b!Jbg@+W81~{i?(22boHRl166zJKmCuUfbk_X<27nEY zT}T7aOP>-)jJF_jBqxaAo&woWMqE~t^Hx8>_`K;$VMe`j)=m)NT#pF15HiZCr=Z~( zXbp3OY;>KOX@5?+v@4&3VSuk%26Dl;qSQ34>{lA2ZRry?^CkwH6(>@dDGDo+)!byn zpdppkBWsv!)7aIoQKUgP@|}`|eVAe69b&G_HEGh)#2AEF%QFA{&#kJ+cXGDpHpxC1 z+?&MPxysR^*1gWdWmy67@oY^od3p>kIA)!rvNVbgFfv-?CXeH>twe1ncAwGG@+jWD z(>-(+1RqZYZ82!)bG%3nkQf-x4as#iv;IX{4rY2GIW3NfJdK~|#nDq=< zYO{{#KN`u24ej}=yjLKfAGI_vGRl1@;;J7_3ii@ywLKk^`V3p+yTKj_gbn-VmCfrX zqLvn~KMV2bASa^hb;`}F6{1^B(>Zogrng_Nte1I2QBij}@hOs@z2v|wW(%Z@n7JD7 zUB4x6L7#2;5NdSXIixbk&}ZQ2hg34{df0`hQRdckQiW)~+jJV3r6-?ohi6TECVaezj{@6D zECK4(Pj&BC1WIqWzO>!cZa{i#MeFsWeYRG` zx*-1(PkfMP1P<7Y7%KDe#C5fH3QskQno+*_@r@hVDPzO8!hFJPo z*l57w9HSMEi}Z_?>@>hLvK!3Dvpp7~Vuv`wH%2LFR=8{%#bTA~e~pGyaE><`y!KLf zWq0pAKkLWN;2UsbhEMMVM9Bd!7%_mgynT7Cc|r@bsKj@<&@jfQbs zZgsVtwHW4sx>dd|&OPGl9i5D>IBQ|D zt`Ju)AGh_V`oSkX< zy73*$*L-swlC(p!pB7%{nEZNlzc;qVWzuq(GE|V6#xDL24s)XQQi!(bPd8}mRS2=0 z>e*wF^Vf4ii6QF3&z-EDZZIT;{q0c?&5c`RXeb6MypC=$l}JAoc|{U+M?uyQN@WhY zevbvz`Zf`oH1WdF4KRX4>zfUVaB))qKQDm%RLJvW6<P3(M51_Vz7>q%x|d?@;1lZ?Pr|1QkS$gVA6dq zCQhB5)Z98^Em2ISiM^XX(D}6ayL!lI%4eJdGqXDQ-`cYhALz#@*5J;%!IL^3Dar**GShy%%ky1Q{d#_o)=@k% zt(N#9&)F#8!%G6C%IY(`FNEU(>;bK6VMF6^sTe)a5-oF=oiUvt0yI|FY^G`M0A~c(avuL z?5Y%05~dmM%P&V*TbN&eb&oF_n-)Sx+mvKQJMXn{QsxWB?ai@`wb9Q`2@+XobdlG0 z@T;DJZHi}=2$y|lR{C3ghVFH|_{|IR=HTB{{qy-fS|52IJZWUWkaOLdyT|0`Yg@5SXuGpJ4OPs(1(3rkbyWibT8;DMN&p8q2q z;ERg2o0)D(C^t7RYl1m?ix-gVNQGJr@!6Qn5v5)Ki|=~2!0x&FGZwDr?kx&B(Q#+BG~z#mo3`ZbJf z!7)adzj-irjZUkuD2n|MT}*weijLk?8U9B&K43a`?>U>o#c8_++oGU(&woKdH<78N zwZ|lQB0`%^^H}b=crQGp$5&F+!@xquqyON+>*)<`X;h2iym5co)8KBLGtmq>G1zNR zhu4q$G+mi;xPPKC*&fab85eH{JI2=d!#8M97n~hJH|H<39C2Jb#;d*!kimyUiDnh* z`$;!LJ^x%uC0_<-=AS*>|9$)UR^B-z4Ww8pcazdm;oS|cehUv$5@Scp;DGeBM-xy# z!5U{JwJUJt+9F0nxv$OU*DiCZwbx6D6K7rUxArUi6#PnqWbi+cCt``Vv}7&)mfyBb zx%_rh?DT58(K@aWPf#$Boe#;$Qxt?FDaABLWZFtSF8fA5?mT<9HdC2v?OiBbwVx@p zAlg74y770KS`-)OLt+uu$rQ0@?(mgyONyQO9F(t6VW?M<4#k4j~$QVEY#|U9pLsV@s z)sC=;?i!2?S92P^bZdL0-w~6hM*$TRYC^*-w7eH z8-cH&_WYO4Fu7{)7lQD2WRjVj+0c^1iaJnwwRugje~vqEC3&N6)kQ-bUnHr4^#W)DEE; zJ|&7%xX`JOM2#ZzJ=TH6Ajdx?0t2lzXZ=I=0wsX~3$#%*P2)9ZrPe=+Ju^J4=p9Ec zXP0h*t9XFc3{t3}|Kdq-S;?L=1~Gj>hE&V_O&XS0y0{dw}IP?D39+|Do)YD`2f1dXSe}_QOq1V z-l3AcIxI2Q*W0&x4}NmH+x2lm+EW`TpX-)bIelDvw2w5$P5&uzw$ZF7&4`Rm@1m#;06W_&?LGbt>+Y zxp!=GZJ?{e`?H^q`y%bg{#Dy>4zSmm~~YDQTy-w zGnjQsLV&u&E7{Dv4hFgWQmARM;nfJK5vBj~Lcg};3BG+OE+UZ$$_N``8-V;9AMw)LKyXduNx9GOBH3Vy(J&Noc!lAp;5-_ zW?Dnxb%4d~GY>7@M&4f3E6tOM9z{q@??MW{hk-2cmw5N=*}Q}?(oe$l$K^PX2jCOc zA7B}o9-B$K4U#WMl?X#$wqHSH{E@w>dK8HP>moRe*SVG_EFJZ6D27Cbe6wBoYA~K1SgWecjsEEumBYz+0 z+Js_?Oy|yHw+{18ek8eb;-T_SZ6v@}SJh~H(n2oVY(Nn{GiD@XhU0Zj{dM$g!Rt5o zkI8zJelRG_reh6!tN6TgDy(xD-c*ii}_6SldOGd+7UkQU^t^ zjj&C-F3%^Qyjjp9Hod2v6n-?#GJ9t1I&$iKlf`3Ks64Wn>D9zF-?JGt zp_04>z7G`kVNzu$mz~?OcY1M+Tua55U=JQ~h9w$Fr4`f*PS z$oq#+eK`+Mnr)5VsAdoWV*s?e@rtvFObgo6VU%?^l%#1KCIy1LXYJXf&rx7Q(D$?3 zfYXJPoSxRB6&O7<0xsHag91x_!+cfjg`T0q$M2e zeGeb|^FrlK^^|0--6!{c?XLM$a8(lMig)n0k-Gh+auBOPQv&M1=*4j&fZzTh&@&ei zopG1F{Xo8PDqWaw?bgkGv^2v*1m7TqQh?%FNx*Lg`+d}RMg(r>)bx?SPZECiP(@jJ zFrq&|HW@t7#0%5PrJDH$#+(+NrhdNyJ0*kJu<&17v1S5Sz=QFH+QPX33p_AAfOfao zx_PpcC)MVS2H)NzUq5FnE9W{!&0+vwA`MG5MoA&Ikl>Ab2*2egdSFI!OQfwh?gC+W zf3hjKl)hfkKuHYOV}6|iOji*q=-V03EM{*5STdaH?B%ynBG zia}-0jXwLI+lcQ1BA7=bv2(|F_^9YkZl4i zq;{2o{n5w{0u5jOTbS^-FPX#Ny7y(K&^@+s%&2SrFG$1<)gtP1iibq`d>m`}p@VqX zKb{Bpg*1ipe+G@i2P0hwT7-Z-NGu9M-No@POz`Plz{)b{Jm?)Wbc`^+IpTCR1{B^`>~YFSSwh5cNxq~ws~@|Tea^cx)H?Z3n!8i)RSH4Cr&g?hi+uGWxhS^zd`0?Iz_PlC{lS2>*3&N8{wvInC2Uztec+1O)3_r*rqqD4 zv2mgLt*83-UERai`o+h<+1caXczcp5p|Xnum5}sWC(Du@0P;fAJNSdgGNxAW|)muPAkPVai!T8@4&DS8lm- z?U8Q?logQ8&|mLpsmyC+YLXK4Jv*rxy5HTr)hcDasP%f7s;*?76nhi-w)UnDG>h6i zOmueKZJhRg;WgL2&NBoN%XR2v1CD}6LOKrH(rAL{-KNesJ2iQbZDzshJW(-K30Fa& zsAm*QxUH5AxKmh1g}=+{F7j7>yAwF{q^nxoumJ$;v4z{k2?Te~SF6i&~Zf|$3(!FVtb=GBzOPdRGwPVI$^7zW@L zfB{a6f*$67B@(b$NjKB}iM9Hn*kT0PiW@UO>i&@^jDMX?sV5INN-bO!)~R88DgFRF2HDDx|!68EGzx$)Aua)-coLE z_yY_6Xr^7IJ_{(L^6)5zDz8s-Gk9XOeik@1@q&vPJ5sm)kvnZ!bdV-bk}mZxn%_08 z>fYL+thqhz0RO(Y|7u} zqUy^?62Ez%XW`v(L$$JSlE~n16gMqBfom-?n!fp11@`atDr1`H>$d)uO^P($^764F>E?eMsVTuji3CR?BSW(O#vXCDLCn zDxy0N0x%^1*clisp$rMC_Lhv~_qJYMDZJl_;=Wng+&DRKfoh*oT5ikVnAynvOvlf; zbn(TdvB5#-y)yv7|7_G;67>dBy4p!q!X|G7FYEr$^OW<=}?T*`Q5A|IB}s+v_w1v)uhL zA8Ub?lBAT49p$t7LD;@MYIq?kP$8`XG z7%02_j|*B<&gT$4EAou{n4Jv2WB0StFlL_X-(b=C#+FFe?C+@k8r+b1r@K0O-=>m@ z^|grkd6)K|*N+U6H>{Qm<3U+?OLx6)I++Hd4K-%%w#%XQyG8g6hMLXOksi_}zt5S_mM)#6hLy?mT0v)&y*p$hiWtVZyLW9Re?F%{V7kz0lTdVL zkw^F(LOHGL&PghDHIl~^WbxbWMbDUhHyS{idjAvVgTh}^wSVa$r(4g=>cp{Ga#^d$ zZ^L8s_;amNs77_X_RhNg9r1#503;8Ghfn;;i)&$o#YKFr`&aP05Lu)EUFORN7g8ST zO!DAR@9?Q-Pkz|^GyNGgvRuRb@P8 zkUfec_|#qq!zlz5k*_~FL#@TQ)pe=52jxn*X<=T;VVUVX~k1-F& zB*nte5LU}{6ZQn?)#*2{W+bw30sbP*KnCd!^aw?vftwk)4tA`7HLa2Ar~b&MH8(<23GCRea>Ky9}O` zX$cpzYQvVF`g(mBtXX}3WZn-F_iI78wXSadI#!@P!l-;62AXlN#xZjy0(N%p+h&CT zB_40LV(q7+Z|)g;mmAWG3ujsqYW?htz7(RGLV@4i?;#6~*@(lU-ZoI+9dTE#TDp-a zx<4NLy2aw#l?5UyeNQBM(Eq3N79HhKQwj~(x&NDr@^;l{Grt5pp5QYNyT3ZfQ*_q+ z&)2FtNy!2Zwp0A!Uk>%D0-(ebl zXQv=~eX-(KJxWjzb9KsF8r1@#4+4o9`YnjJS^fgS7uww+a=6&;ky&(kFE_wQf{E z4uaX$^L?GZIjS0@V{a<1I)NLNS>ReuS5%THEo8er+jc zZMPsOq%iy`n;kWm%cL!=H^eu9lDjZcq6>ZbaLN>ACS+b8EesWLUCLV&G^9E$82Xh> zp<{L0;M_yI}9WcGraJ85b3m1S}=N zLGKCxOS`X4-1iMqDf|P;|RDO^OWh0PmM4l7_5s=&&gM z!xgV?g@7|DbtEa&<)Dkywpej9Irl&P$n@~KFu^7Px(OC0j}KOdl z`f%qah?O*+v?^)%2mkQU0T0AIN=8y@?@+IF=gF&}vLSKZ?v zJB)v67o90*&L(B6vE!u&ZI%nryYfst5lzBZ7qONG-en8QmDk$wvddk0%IRXAXY5A* zC+K+Um*rVFc4IN!RvbN?VqDf0^W7RVk#V#_)g71$S%ZitaCifx1OQWxAd2C+!G0(3 zb0k2Znh?REM?$X&Vr8rQ;;_<$sonCNm7l(wSztxHUUY(f|G!ATd~%15TLkFj?0H}z0;`_8U4H!VvIAK` z`vPy>@R23mVh``&Z_u#nvhcFa`5Ax`uQnVG^^ib@8oo1FZ~WK7xZDLAPZAJDLQ`yU zhaj}*J5{FPBEG!|$f-y)ZAL&$R3u2Yf3l=W8H@_bAMw#F3S&7Ip?N|HR?!DwCw__sEC`TTU}@eMFP6Yc0_rz94^|N zuGBN2)pTd(^bg@Xj};_wkB_HZ#QcshVJq2i^SivCpmMrg;&l1073MvK4m>n8^bN_z zv|};E2t$qgVNcMn4)ndK`Wk!wrOmpsjZ>16Oml_&|6Txkwp6dA81`wh%)0}J4{8sU z5YVMn1!n;5AgviwY!nRYR9_4?_g{j`az) zIM~!C_0x%EydQ@3#$+bI6JG}o?Udl(a6vIj7Z9?183&2<^k>V_NHwqSa=-`dwmWwb zu^)&J<%$)7N?KJ}ohRQ_l8d(QrKe7Ot=mZzj+L^G$ak4)`hAI$4JH8}K4=ySkfVWa z5(sjw&SE!z#Q$Y;%8f{;44~);U0jh>oP(gSWtn@hL@A47Qv2eMnpX zApwG7jr=VytUDqBlM}sdRJFBfZX8z8sSat+>O5DnWSqZp)Y&?poy*EmE}fUQnM0xA zk+&CDFqOH#f=Hv&zh?}X`&^Q# zv?^j3#^GCyK*j%uthWHF>ifcmFCZbPNOuVcm+np#r1=5T4Fb~AoeBcd-5|)N1f)~C zySux)>%N=+H{Z-V^UgTKj9kt+ckO*vJkPV%#)dFg*sGvzH1Nq(v~Ku6r^7!f5v|J3 zn%*hFy?dzWxt-~Kq42C93FU1mw90;etRS`*^>(jzG8pXJcvH8mNpuoDxi?Ng6*NHS zvllrkh@5KBA-R{qnY)27&Lc5HiHgL}3xS{@6?t36wVrZwGdfylfY$#?RJUn%4y=6I zKIA6R?8GTsOIbU=7=6}D&3a}luQ!=5FFr)epF&c}AltXYld6H{$NaYNzhub1YBCVY zDnY=@svOx2fiB&Bn~=2+S$}(1wMZ1S5q)b=yu%mJ6T#NRxq>4(mho{45(mVkq^U!s zXjtfE*Fa`^x*CRuTa$KQP?|o`2U!r{E2Lp-=SCV4T>@gH&#N+O)WER_)&d;TfAe|oAgcxfG&utD)=Fyi{p6ODdjyR4Fh1slM3gnpe_H5y)zAB71UnM3o z;UbBNZVqQ|QIWR>??m?_e4)p=rNTpCdf~-bO?en?Jh`(N3+K^Z_Y;tL_i-0fD@{iO}M0C zI^cQ>$h`6ErNRMxm|J4mwvezZGlL~qDzNQr1bK~??|KR)t$wlGp!mrw7XJlfe38qf z`{XNUn!WB9zHCPQ&7P)zKDrMvW z1~QEzG6o4!IJNuvRzI;wzVv6xAX8~Q5p>{|6k^7LFwP}8^Qq|0b|=pS@(~%*Oev-= zbMgk&7NNqu2;dv2^&!9Fy+`QNVyWdhzWQ zwvO}gSC5ss7Zr~j%{S%zolGt=RJdMK(}T~Oxu_`XRd#SfSO{X>QzuQ@nRLj8J@T^= zA-T+DCil0ZV&NSb79Z3g-^IaDfQJmp^t*9O7nRXK(Jx+$KrOq`-2&nOcX zuK!Y_<0&6u_ElJbH+?I+{`Wdq1~F4=elz)hD5OdlXP-UYPqN@@%~!fe8s*%iiY6T^ zzSM}2uKq0$4-oL&i=A#?K=7LgT7mVkGjZ=7>Jn5Q%j5;%&%Rq-$MAEHM=)qqygfrl zG|2)Z~E2Z6;CL6!M{=fY>P88%S*ls#a>t-JM5dfP$aDp`@79{y9WB5=cfQ zEd9eDvAT^0ub6uzxPekZ#$Ix0xsg_+^gHN4U}`uU`54QFn# z{_&qi)Q0nBUBiU84#7Cdf9B0>%P~Ot&_f!9lo@M*bUx}(v^E6?RB4p^)@w3epV9QI?!%j_ z`@&m@gFx1R2x&(N^12#RJzT;<@TE3|4Br?6D(uZ``)gjpEz#d^ggqPc0*r0vR%-?+ z3_69$0Psr$vZ}uqi8}n>+H~xguqZaafAdQPrr(3}oUUu$A?|-l?+8LVBOKwazA&dS z`(iSBmA;j*Y@VVIeY#Z+|YQlliQ}TwlKc< zpfkV6=IS0x`O_RPNM@`(jv!K|szNc`Hh0y39R&g~evq6Hjj$%Hc%5xeQXMo^g8uKHF^aYFLWByZd`eV^`Rq3PSXxn6{at3~Q5=;d4rQ1{JQ zw6tEQv|J9-=MXDi-)Z)+qTW~{NFl}OD&pBkD!@~ezG~7Z2l74TL^|V#Pxxri$9sF> zG4JEK1>CtfH=R^Iz6>1HEu65{RVVRHrsu~S(ebX){aSHHlS~VUXoHv#N*;{id?Edz zoHoB;X&VJ)ek#Z?_d`>@z&m+k^v?$x&AZla5^wtNx`KK^6ChOWCL?AO;-9>!|b!rmssK9vgPuoB&^UR`?Ua>s zpq8O#Em`fCn9`ZmG-UIs^rH#Y7xVROp8M&kiO&s zAUhc$&!(b^LvcjU9)f_KfJ8bY=BA;{;}ZJ!?jmi{LoOfoJYLDEnm0UU2U zoKOqRAdt=%-MO|2q)((}b|W*_o0rmCwh>-)rVfirGk{~G0@a9%C^D0NM76ttVJ=vK zTWF39m%MD>Skb63WVrS*gMSPTagtZc8qq*0Q+;$a%y(*%3wja1HOaRsrYo2suWH=Z z+{z+a{BqapM_mxvi*xf)lDKE)g8YTC3`n=TwLCGCaH^_6)xCk#AF60ko=FwYhDras zujsi!Mar>;h%xP2aAUm14kJ5DH0Q|K^t;Xh_>(iQIA1K8>`&(q<7Il#gy;A$qHgt~ zvM#MNxPXLX&v3xegq*;3elh+B3u|>fwDlK<^?z7hqbIL(K54P!3W`9wD=y$<1swZ^ z1D~H1;$!fkMYlj{C6kxe{}#r^VsJP=NWT%6tI4F_4E08N{UMaTCAFggrYdJJSZs(l zR$v*P=%l11E{25_l}g9%=3ZCdNWoqHs!h(t$;nCGWy&ohH(fWWu3AA=eZk|fmTrDF z;BkT0BkQ$bFKGob1Z#jyvpJG51e-Hj+0uNn>XOU8ca)zh@Yzlaa8UO$06cpiixn@dlS?2 znm4ry8Ipg58uZ88h=DW~q>caT;pflWU2zM4n%f=Ug{Y0oSFL`j|;O zgYH{zrctD1kSU?nkjMwPb@wU5CF!$`?Ty9qI74B1!;{X%Gg`n=#{_X8d5I~ zlRteQDmfXW5NXsKZ~o(U0ky>oGI+HE@+gX&@eslgV}586gwF)t-$Tna|A9 z)ySFVTKbtOQ4VPa2;F`5c310flI~Svg;Cva_AhNXZZMvA`I3{f5T%(_6SC71e$NQI zi=tiN5gUa<2tB*O<$V5druIOgCpCEB@kg^Y`XBNolQC>W%OgLsZhw|ta1m!~4ZbsT znAg*E{)wfcJcGqXR1JB72f1~tjWF2Ng3%8;aC+HO)nDSV9)B`U5ZMv>YJ=sb*^`%n zp|ExvgV7BZY?Oc-%D*XTAibzLRw0!0NtJ1%H2U)=2|opOR=U=bb%6@Fe@AbZL*mLNz9aiS|IPzp`>Bs1!@G5Rr1=bh4^S zi!W`$`cj@4=LlM<{i22Pn}-|Wyle@&)>Pif9s`9atF6(BJ%wn=v650uDi#v3mFyHZ z`&Yq;sIe&52HJ? zw_LJ*mVV0ij~qXzcOJ`joqJX+F0aUWpP>BO;JusgfHvYdkHt9mmvL^CeCy%70*qOX zD%R___!X&5?=(61`7dvEF9 zN5%c>zN`z~Cwu;CZ64ldvWit34VWJx7G&BR8=2DuVEFqRU@$PuZS#?v7zX$Co zZ*QJp2#%D~n2QLDOVI;6H|maU&pGYc(i47Pn&djU-Pc;Y`$euFQ&^f z^^@6sExnt9$uh?dV0F9er56*KbEvMv(LXyAKPy9Cbifl-k!SuhT)6Br|FQmcGOAWm zMI61A2X_;Ql(Q7h_u`2M-TWiBao1}SJwLGIQbf1C5Zz5zo6p-`GcvR(wq2f+_gF%)!~x*9|!fY()U+fbuKbEKH<-3 zgyrq#zQ5B!7fB{b!~WYie>`871~Ug^3=6aB3`nK;kqvZfxeMW-$;f0Bi{pk4JsD-M z7m;ahxV_BD`%z=5J!9{o!PQCn9o@ill$C339{Z!2v}wyoUQZ+{==E0{;fmz{8pyr$ z-GA7SR~^N7zFHWaZcO=69AG2cDu{emqw{rdrw;qLC1Nwq_az>dHSIo95TXHcGVB7e ziqrnEu56nbl1rXS9n%zM_27>hcLH9bvg~} z&Fs(2kzIv3sDA?rl(?zLX0>`kdy1mxl%eWq(_9U0bbD&xeUpv(su-!64$F1dPQZO_ z%TP4;rbW9|sFkG~#WJl5K?t?X$RxE%)HE#ca&vHpr$cj!;J6vxt!E zX*6S3b!usPdvoy!e}MPW$iF0h>C{LWbI80=^RK*Sv)xmu69tE=Bd-{f!zU@?rmsrv z1v=<}Eyv#0eolVtq)5%H(n-U}=lKN%vta{P!EoyKCa(p?EM7-)wzWfXf^0>piR&vO zsTewmwY839a$!qb{ZFUI$8>6jS+ezHvf9bBG1Ct??DoubY%|3OU#=U>xBOBC%}p11p3omE%7 zFNZ4p4S(;#i_5bnRj})yYCmOq=>0Fm_bimkA3+CQGNQXoJ2ciP0%0@FPH;nltLdD} z>la@XQ@JihHr?jOUl7-J)-L-?!sQ%wY2rd2ZG!Y9i-v2rm*tz2C({8PbDw#C;`)fK zjorM&mo{a6iEnBzt9H3K!btU7&G@v^zBZGFHESu0?(gvY#OnK2Mo2Qu4a|ZXis%e} zJF}}kH4b{Y8-goyynzk(=jnu3P%v@~`WrRp?)*6Py4eYzy^64*+qnZ;{##_NY8%H=VQ!~#ya>RZ{< zg{Z|04baxliDjkw!5BfxvA4kgzT!_*M*PjSc~V4ldtFRcikj8L${EOW07|uksS8zmhe(^44=cJfIRrc&|6Wa$YB? zSE+&v!3gTs&Q>HitNxTBM*CeH{<`Tl$Ingn;<;!lk?z=Xu8GK?M_%X((!WshsE)c* zNZ;h%$$1~|t=n>HR^DZEe`LnN>fZ%U*Q2}bF*^ajN)zw!T61awii9rU-lX0T{ji^3 z)U5iGr+@t?o=&vjk+D|E1=9xks6QWr-))lH!n3|63Xlzm6ChV{-rq1h64!f0kt}^Q z7|gzq7z}}^po_JdJ<5dY(>#;OXOJIDhP*ExeSe!1v!M#>eOElYZ}!MR@&0y(AX^s% z+>N1^o?O-G;DW1|i7GOiOy-{lqdTo19~Ss8?9@Rfewf0HF7|yMu7-EysozDt3L>J%{%yKeb|zh?V`tCpmtW2g z8ozz_5f}0^NDg5dD$td5w^Wb?MJco$K9d%`SHPo&H~L{vFt05f?d4eq9gQ9?@OiEu z*!3SXg-~N;V`-JTs^a|6t@gWWOlX(M?++Ii-pAP>G!5*08y2zFz=wviaz`6Be`-Vv zE)+Ef{}*7gXN<#fKRZ>~e`7sotwbG4YRZriOb@|7v817MVYM%pr8v90UShFXpGWPR zR<=x&6fl#fPJoxEX4}qokCj`j+T2r16QGxTwA);XHfJaH7q2lirByj( z&8gG7e5M>#9#KZOx3q;pVl_i<^=mQrWY))$$Yt*BU|mzJ;^vfg*=r1ouSX=R^N+pw zDhF}?)Pa(JAlJTOFc!j((Gm#PY@;Y!kNS=jBZ_^Vsdg^V!fYvnauB~hE){J~r|{Y@ zzBlnK{p)9dsPUEizKXYrilD6KBv~_x@I>Y{Z*4Q^_X7q=S2z;6%n?VB4Laf3|lut{ddj>i>wR%i5KpFU;6Z zHn@wVEs3e>Iu^3TyPr87cpY8*(OE&5^NV;w;Ny7yx&_( zPIM#b@?Y(?_w9$J{`Bv zU2tRa(E@+iAhJyZxkTW{75S;6l%{8aK`w%BEh3zj^#s+Z;!729Vrl1wLc(z znnHCuBT<5A#eirMsDj?lSMIp#=>{1Ar%U=1v&459U%-dEk^&rg z)z$vt*GwBl8Od;XbMqo$*=o26hjB(qiFzo1h@Q+LD%3?0Oi2UElFK+3hg| zyWzQp!y?*0nNhxn|L*Q(7O!C}=B@E-EnvrGpPb!bxi-JETYYCAS~q9UeVmdM`2NnL zcHh+w-KtN80<>@(q{R3<-$ttB;D2z?E&5=8KWB#LL6V# zz|q2dfcUBoF|YG(+#qVTqe*pR&)tEk8(soMhS}W3H{$y}W6g(eB>X3hWheinb)(HfW-x61WyD4Odnt!8C2c&YVj z3ck~^LSY|s*SSB^2?)!MUQcaw2)RhujhfvH9&HN=%xS#dD@VRyAIMCQ({c6`d4HEu z)&2(w3~AqY$P9^I*R*l+g_)Bts~CuK9&E9m*Q9vkz5J(om%-`FH)*!mubOXn__A( z41$&)p^2PHKh@DxvSU>^aWdBtrgJQnWId&b>%5yb7?a=_()rvWOL><_FeaVp|JMRA znv{KOS1QaFgH}E#e3{4)F5=u|B^mKLSM19oMMdqh($yOmF7jIc$glLs)u}e{_%+V* z{fjf{XCd~w{fRn+1MBdw404Z=Z_puNaNoyert*Ul;)J1M>3xmQv_}vB4GE;kUXXLp zd@H@9RrIG6N9)K(-I^xf-~jR50WLLrU?D5zzz9=h2 ziVJ{3(Ge&|XH1*uulb2)+QCy)oqx7$WS@ZvH$wXX>!x2>!9)^c+p2dyV#c?YWdm*< zi9&(rod1TkU#4cp$`E1p#wtUSlh8d)VTN|;f?8w$^&a;qUi9A#q!S}BgF4Ygjw#lD z@ZD(ihU&btB$c_1$I?nd(;t(Yz}Tc}Id=CCYFug^C_Go z-Tig|15tWp2xJdEoiOBiK7KquOzoUF^+1#ERb4yXG0%R_ET*X%Nhk94auHtal}$c1 zq%1Q~DU<$zY6BMnq9c?IolSM$8^pF$%DbUI^XpqTiU+Lt_UFe;;Y+zlbn?NAxT~Qf z>WAas9QfQ7@6vu!xYmcvqHKB7cevTW6j|Dh7ttN`&o1qIqoUCv_pB<*H01}m;XP}% zf5!sd<(nq{thi$r{!6Y3jKY4X+R@%(X7i^;~MKV^Qd+077%gOSW>S2 zVh)spoG@JFRI}OYJ4TF?XLDN1u<)N zzjn31L8+3iudf=Pxl4X`uZj2e=i;&+IZU3u-K2a^=D%D&dmJ3EjLBb1deYCqu3R6=~Z1=rqLTBgB@#V1w#n|^yB!!^dGF}3zb#-+l2uC>sP{9oHDC6R3&Q_4H+T^y})2=vAUD}xK-ZeyPmI3fsvlB z+BRF(KK*LW*f)VBc?>z@?mr>~vv8+(q$j9cgB~>xLbp??8U`M)Cj3W@sdh&FIf{SA zs5|5xp-_pD-#Y05jn?~Uq}kx8mYheVc~e-?9I{KQrqtD&EdgX69$Gb?QurKa45MOq zZi7HKk}dz#Jhc5sy?#knS(@qvp79wDEMAHd4en}(A^wA*eJU++Xju^Rt-BL1oZTDeA9p6Hpef-`cf?D9nqdSSs{y1q@lbK9*~J^bo=* zrOIpJq?kJHRo5;li1RP(+TeWjZJ#0c-2cabU*rfg-UnzYzk7|j#2@mCcLg$Y4<3Bp>l?!@+nMJeRr9o_c)Vr8j)kdo=i6iv>OMM-#@$R(T zP1e_eoXNPOv*@S{jJ>_tO+X>{u${xLV?mxu`}A5qC~4!<1TvA&ohgsn+rS6~p&hmL z%^OA;`fhgNyCqm`-JncTH1fxY&}0}HYJ*?!*6Z=*tdD)y&ZQ40C(ghj;roa}OW)Cm zrmGJ*4XIge;3meSw4u883~=y@)#!#Wc5Obeo)5p;d^}vcu#M>|WgIA~`Rqs@FvBBK zF6!AJGIz~cSd783btQREb$VZeJO5P%p(~M>yX!k+I+M$1sXtTMuA616*4W=An(vy~ z6=tt&!?J-RkRP)_wBZ`7Dq!h0ACuKwTefnn&d}^$@#(A34lTO&6i1p8QZdw|%5HVd;Pac61G#XvFV)2b^^7tsuTK#E^=KUY5>HHI`L!dcWkz=xmX z5Kp&?*PDC={}rF~m`zV*@qEZfE5=qDr$XY&8e;X4mzc08)xN44GdUQ>d-kD}+jU(0L*0|k{8ogR% zZX^E1lm*SSJ7{9~a!fl}O@JAQc@FYS80Uj|v$+gZ#n_u+#r5f3SA9u}nn+yqiYU6J zz0Khd9z(DOK!AQMN;kN#&AG@MZ;oRJUh-ySb{tN?jgM57rV<;F?&n24JuiNGd5rxN z%q=y;ectdC>iZdGvSy6Ki7@}^4n%Vg_W93b$D|0p#b4zR^>}y>HR;_=u?RvO4pp{* ze&sN*Xgi+IKAU#QZP%h|kJ%C}ln(M2x9%BoKIy)ZfVEa~jKvjQ%qQ3$pLJFT$2hbw zVK%6mSQ-(R%glHGN)mFjGg`m2?R&z`TDN-4i9!PV9iNiLZqA+IpN$J1<-i+r2Va@a z1u5@(oqrI=T2p4TeuwZ+kWRtMQwZ42&Q#*vp=xd|-c%Utaxcy)wH7yN?MGM<)puvb zRJC7>9G4Poq<>{m(lA18k)g<2@t`gyc4MS(ff2XkyPv11$N zgDO&ciG`f`QYL@SSAC#jpvO^j*E8MkqiTm;5-_GjeSrx}!B4=dF2U5z%q_>fCl0V|dT&?a4&C`!Lkw{?1+gA_L`o z${^&7F)UB&*%p5xwB*(4pRfsHtroZthpUrd~GlQzAw3?;glAbpO-NFQoKin&iS3sI`mXQN`u)fOKD?yqxAIJ_*i zAL)1%^>q5(K5jYlL8ohP;a}4KtvHmf-V9kxH*};kXGMGRq8`jX?kYirfA81kcRY_< ztYda*P~PcoHwHO$B1nZ;&VCkewgCVYa)j zr`he5Rkb>7J3%_>wSPX7G==$-68t!{8M56vvlO~_wov1-IyOImO@0;S$&&fWMQk2yxUntKQKfc{xcR>q_Whz-sFAsTPJ6GA}6RSM-Cl(s_8krKng%N(1 z{gnaQ3ar0vr`mOu+LNCEm<9e|;V%c{b8SD>j#_h3021nw!zj_{Zi^WoGcl4gDhEmLj#Fk`|NN~ z3%H%OjlViTxA+QAxxC>GUEoat6vEgb{pVStfRR5;YpwnpA~vp#qfzAWnDFim%CnvX zhT*hF<-2bo(e0IPw6DS3dc<Q#WYt~Ppvmq zaWymQ(<)XreW`^HFlghhrpl=+%ROpaeABs>s->obFQ$#vIKBH6^=Ybi-S&kN^X1bxzS7y|eU`Csu)L4&IxH)Jp^l%Is`-Wp~5sAc>!U zNkJp%Ay`?lbri0Xa&y2(36E@)D!Ah_UD*wO%dyJGlNH0bYFrV*wDk?cL(sc#<)0&8 z$-6SwIoEJ|H#r`Jxk%RaTn44y(p_zJQ{PNc8@&zXzVVz>)Y_a8QnD$|?xn(TUEmrwu^GUNL=fMxmQ~Qz9K_~QQ8dZx!zdibt1vZ6A{WA5AyL!NN z?$5rp%M;>ZKG89E|K(E;JVGAoa!w+~vg~i`-$5^u0^abWC_&`F${>Xz%WYT*A4HN$ zzH%y7q1-l9h&f_5*?7CfFNn~6K)J5rxcQ zMN`=|U<@xZ|KKB0Z{daNaO=h(yPwFN-OG)Mva5wwGYn!<6U3$ez|d$f6Ttr^#bf%O zMf4cA8;td2@*rfVR2w75@Of;OhM)P_4sh**tO`IThO%)mfr$(8C+_1Y;ddj>_$D(# zx{Fk2lT^)-yqN~VWc;pL$;7LLO9-Fil^pQ!T~-dFU;ULWDuDs9e<%u0X2zZ(82etB z&SLHzZH$iGOrpnqAHU;b@&1!q;w^2R^El#CXnrhx@1gCjx3U(Kp)5$z8WQIHc1b@O zSCidX0!~hDBTz}1%)+87<7E;jq#$X5pb-Q!_dw7`+%cV9y7n|_RUi6^%0i&yXhoh& zX5=0aIs61)P5=(%#sNu`LXu0~;~C$S++0{3g2F$`a=sw$u@Y@!7e#1DrwX4r z+Ow4eJP~iLP2-jf5%AXK$aS`7=@3vm_Zu96TM=(Ng|1oZW_JRL9AuIfww6RIqa$vq z?tje-7f}onl`V?Ea*!)QjQ$^}CY<+Wb!5!W>f{(kmNZUs?3d(5vxa3wiZl3loIiKY z<9B_DP18EUziB_j=iCTuzz<K@Ot>B}JAf&Ct~Wom$i1s&q6I5gw^uahtwd%NsoiUqNHHX^XMseA@0Yn2 z`!V(KoZKBCz}Dgk7>n>2B)W&3?K-AC!$~*ycSGKJYL<=+4X}Rd#n&Zq-aT09`T0|t z_lTN933?G4$MO7t8eNPOoWp>2+Zdmpw0o&ubqshc;1b?FEzeYXqT&|f3ez} zFRiwzrwaISN zWc85_6FY0IdP08w{2i_ARBu5OwPaZL>TL|jQMn;^3^|)x^w9#s@9rY+f$o35@i~ zkbAtOLiY`$&+$fh3qsV)$;}JSj4y5Mp>z7Uv{d#wEPUSI`C^>EcNHbF(-wR%$uJxx zkogM&rIjS~f?}D6(|UoW2H^Y)?IT6+o%d1(fm#lL`q8mU)Ma1$=x12eBl^q|L}0>? z>JU1Hoc5F%$@K|^7sVf4&_feJ<_r8?bSo1XWuXrhF?cM}ib9%1i@s^Eaw3yO0cF%s zHJP|4^B^jLc8Q!%ulb0s7v%2z=G0ZRJR&FWc;lzQ%H&BINv2KOwOw;HDYUGY#GP8`IVaW z<=OhjiTNv2Rk_=x(%f=tNF82nt)-D6SM6&X%R#3%G?eDf7?h5nCI-{E>5z^L-f2E@ z2j2R7W|sgPYnA8Y;*lA4$e8>K6r#7MkPOx@lzoNMJ_#?v36pyk1>Liy!x+9B>bm)Y zn{ry}2{#oz_K6=wQW1^Nz|0fK>O2T%m-^;&27kUiamNLlB#W}^d>=029*#pL9;L+a z985#kGDuFU;=T}9$WZF-l^2zKSuhLRWfhdUaEdcPi9+tb@ zKI))#uF9MFM6|q?v75wQ;-{}xag00L%#iAaPdzD1<3s}0 zfL912`7Aooe7yPBl*$j}_R_;#%p}+N(;aemtoS@Y(Quk5&_Roh%zwMP z)cfU0DgoF~q#;io`QguODnR%Jc$B_lKfS=qtLTk5#lHa?B=`NAqup1s#80xO%9l}1 zf6_4KQ|&Zf{WwS`l5e=>c~YZbklO{~8C;98Vx~>?{MnFsuXtw35F#DK%5h7B1*W!Q z-lO3ro5Qc~CWKD>@;`i&?gFwu&CO^0N0=1Vji1bKctYlo1+S#dz$H9h9CPI5cCn-T z!CH2eMKKcYt__1D_+^P_8anDC1;J^A`-v^%>~7~wrJWNy=R`S7T1m5NzT|21LOv$X z-85lwxhZkVyqfgUuivM#&;ADW9$EOI4o)ev=9s~5%!X$vTV)#JS(#45RRl}C91MOh zrk=ctb()KN&ZJ;$pood&;R=!$Z^vF)8#+Y&m`;klhxs@z9rfT@zGV2^EsQ~TzaSfAlN>bcproq`5g?gLjIbN zR0WioxkJ}cX33>1m%w0l38~DL_d(C($Ml{laEv)`ycNwIqR)YeT$I^wey(%9+8kUrm8`a~ zIW`Zix5lyT!{)_FoqJN0*5TLgXf4BJe|ZB*!dOnTJjiLNA4;f4hd;<8W$6EZQUay1 zOvPiM|FkMV14p`L!oh^Zu(+1*-Wjjp+Hb2f%q_O87#lUFn>XNc!i>#CKQ_Lul)};; zt1>Na?*r_~yGnZ9QG@KwK?PzZ~7wr^1r50ALkK;Tt5OOK+!lijSEd zy|#J7>hqFc>!)R?x{v8O{;~MCY)Usuw@rO0gl7n_cF~>f@i%cL9E4<(oC5IVn=p#& z&C1ny4+fjj--NRWcHtXTzZjEKMTKY&hmjWgoL#>WvHYaVyZzV}emKFuB%!G3(%c_7 zmwn-N8DObhSI&qrXk?fW#q+F(=?l@zK*3&i;q9%tvNokE4>$)iYja_;yuX#N1@i(Io!ec#KyN`7+|a{C!MN41C&N^*uh8uFXRFa= zvU2Yye-0{k0NV5@$#}hzoc* z@lTBKQd7aT0o%Rf8=Z;tkd%JTR34%-Ve#2^3ENe+}ri%WTmT#pEt z*!DNh?2ijBlsy{xDZ9HZ(U(!Nhz$w9`TaTz@*5oJ_R*IP0H;z@@4unjW8X~>4!5=a zj+zWG@~X;A@5KG-MIc~$nI&1U`zQFq5eiUGL|H~ft$~@3n`Q7p@Cx+;U@LT@89zF( zoqQwU3m?Tl&#&#E4srmpjT)TugJXAGfQ(ge5%r-OJ-Xn6l0wnDY;bi9*hi&{#`IG& z0QA}1K7Ug1S&LO>{1YQ3g^TSNM35x`-9YMu>jx2OvIJ-GT z9;wh*78-F!6XlN+2AbfykRcX#PorQVe0uELw))HQ5zhYPG*jug^ zK9O?a&*#q>9B}Faz+O_0_QfJ!Ani8JXwfSl;04Hs!%sA5=tFkhYgQJNMxL?1vVJM` zZ}Fn=4TaqpXRPlW!zm1lb|{wLYd#1j=U86I1gSZ6Oy7T5J;;9ffda7YlZvcE;p*89S2M(?ad@(8DM1( z)0MwZ8P8&0eOKPKV>H43-xb1@Eku-JZyJoO$lu>~bVAwG7;|C1Z_$1KzNFAh#rw`^Tw21hln-7j+cNxEs>IZ& zZKVsKA0fLO)|(as9iPL?bPUoizY=j|O)KvJ(bKC~AC? zd6SAQ=2Up~Ae4>p`zi6Bae0hMB*8wFS`rnnZRw^i`FvlS6P@HmS{l$W83(hy8>L3~ zJr<2s#@kl#t2CXZ;1mKKAsSn_HTk3NoDbSd6G~I`i|}`M{h(tjBy7^`$9x6ttxGM) zgaMcSJH_SJLj!stH|~!H{YDRIReP#92ifKx0dhfPG2*=ild)um0M}+FT3lM8NL7hu zaoo?ew-hy?)Yu11r8{yn>xry`9ryZOFDO|5f;c?oJ2z0_RKHxj-4FxsTAh$rx*AEY zZp=5%?vM$HboV?qPRqeV1adXekDB)GyNN>1ycE{u#Q$0|a2sD&0Sw?5_2a7sTi7q; zlO9OJy-AVD;%y=CT`jzbLG~6~IYsPsu}2k19l5n7Sba2Q-70UcT%WWje=~N9DuKeq zfr?Fh8eIwU-C0QnkWGF|0~OVj?}v{cufW|@ zT;?n&F6H(3pvtT-p~-Mvt2m%t8gh1?OrrFCYB4u9*}SSW@)lg``v+(xA>T{3O3%jY zn!JM8k>#-`;s8d5xO5K-Ps#{h%P^ttwB@9lER_G9bsieq!G;X&CJ5k9a6q6>YmwGZXx4fo=0RXFNmLA0<>i}Go?u8yk}xyQ@XDw2(|UvHWL zYmt7PnH-nWFE}3o2qepXy1RU&OY`CrDl{rAWHB}X7u7bC8hqqD5CsD8!t~<5AL0QM zGYvev^PC77CRdOgCmbJZ0~iJm7+JQXNI}Awg~?7f%9;jH zqz}n+=fj@S%DWqR$XHKb;@yWY$&(b<2yB%f+)UM8V*aCEarGaL2OU%eNE;c^0wd|chhmkF zkaz(Sv$?{r;|K&O*w5dc{-5Hm`mf38`)}mv(cN8wbjN6Elm=mdNlgiN7u||KR(}_j%o)p65CDo_p`Pb?!Ou^M$AwPBZEJ>yZ#Z z$R77d0yP6bRT!lmd(XG?N1tOHM*0JjMD<|TE+WIi{=`|aa3o#BB3wKhH@W!Yz>H=8 z{rP_fNI(9@X!i;2z+28)q4wZcPfIl`h|Y(Mr}MU)USm>~m)@Bcajjr-y(DVO=?=?WJ0U;yAaGr!@@t602N5-9;xKqa?P@vUWl&!9j}GA zQr?YnG%5F^0Z9&9^=rE+NUaiaXywo8|IEA98GtQ_vyUfdUR6J+z{kjgcmVT|@mm|| zCK{&IGsfs;00q0T)1oQq28!xlPGUqm(zn|kTpTNIZI=`E> zAV73R*n07l?yW3cQ>l=^H=tlC(Uj}I{&~TzzN^9iR1`!JsFN!PmS@@pIxjb`Sr`Jr zGDg#FE5ym~U31?QF_V8srMDm(7giB)%2R>oje16>OcMyNvU9{~~Dc_NlGbV3khHVVOK#?&mwfofh5W!H_UL19|E-TM=a z6!Fx7y|JtP6rfKO)F=S5t0bd`frG)X#g$Fp)U(J)^=#sY+$axG=pGT90J28!N&WYd z3PLjo+>)vsXmSM)seb^-%n@}X!icfh!ksK5uwMl*hhnKR@%)Gv14?UN zYf&YQlN%t2R*iu*E-2n^o1yn9V*zn54uOM}aS1iK`D2ytM2%h5G zsuedv9M7f_N|r%D$}(vZO;YVx9AeS9nM}|w4rS~~*F=2+2y>UTht}PSL;stSktWkh zs%W}7dUrvI=N%Joo<1K)GkiJpT!%@YLYmM7Nig=5B~%PLNY41^7sm;J{vQ)WErW=s z073qtPNv#F_CPp(7cnoB=frYu>{75Rg>v*d0RM=GVu{PfKVo`AmVc!BFyaRLf0;e%VfMU+CY@_I1p$2X(%SXh$>5d1 zB5=$sf+EO^}9%J_9%s5xl^ZCiI-84DigLTmr;(SzhkR7 zJapN*5KQ>UZ7P;>-RZ2e1q;6wcOtIt=|W*%<8OmvahKF>4mZc9932!LiXS;uR0X7o ztoovagoa? zgs1Rh2J21tNwUja5oO(D;ix-UHkBbUH|&0Gis99lZ(xFfy0XPtWzIown5<#(47O_E z8*u>88>@Eb!RhV&`Zu15J4gVS(V0E4WA*O%BcglvCjoo~2hNF#Hf^s+uQRNl^<9YT zU|_W&G1LGx>I#?%ZQ!gSktc^6@lV26xQ4VlO3<%0Iny2MduvJj?D`aOjXEH&qopiNK`(qqK}R0GuENW_VRSfoY#c< zh!KSzlcQ>dU)}pdQPh4y@Wsw)@yn5yi1$b?e>{sQM(gmDoK>Db+z*K{DA@5IBJZ>M zqw`h#evEt`Th+4fP8-#`dMJT=>0j{(`!Tl#N2HgT5;aO0y3oP1m6$&Cj2e-ElWT5` zJ^Pi6d&x?}Ii(DPOFN8&N&=KodZfo7m}|0+6Ue7Cs6R{sz=>kG-qUyn(K1JDdBY|Z zN8D~EF{(12r5*N#HY~nIZ{E>;UsyNn%%ZbX;@-yspj)nyDax@0u=IGRrk{0xK+sy* z1C;QrmR2BVEfhP&8MVslVi5NBF*j}7K2UgAE6v9NptM#D#WAd%fC!681AAd~7t2at zHjO&m*G64{HmIA7_KPr5G>E17!~rM~82TYH}+yB0x2ex2})y zo@h@COaR8w<8UEcI*F4tVcv<%PiWP03*6HgK4l4R>+~@OI+1CmHi*U~UvG4zBi?=% z8P)>2(4A(xCC;(RmB$CkA~G98qedZde_Dm^qL98C zbZ&-~cWkBBlWpQyj=&uCs+0rX8XUI`GKY~mL7SG@Ac+d02FDjvGhVvLWm9EJ+jll&G2~$V4tc_dT-0(NMskMGBxxN(#Zhe!*A8-cW3}@Y&UKlyWm- zc=!9RMHs^$8>=_z5&#-BC9Vj$QkP-~?4s1RtJ;D0U{+gU(3dU;hZ;f!ui8P{)%p@k zNY(Hdrm8f}k+wk%i?fa>{;I)ypusX6^*LWvvH8LS_K%pJSbyGn0}N$XE;>s=&=@a% zK6a?_o<~s&=}59Y-yu>Z)TcOp9oo=ul5;eRyzgrWd{YI#itXUAC81YDr`dDZ1NyL2 zdBzlfY(BSWzp@27A)A3_&_5UI&4nH7(fL=NpPn%Uix1lr5lUsOjad)?f}y7p#mXBJ zwCxX5Un^Dg=VjiuL-0a(3!nF@GWPiLA%{BTsg7$60Li_($*AG%;M*Y_{K0pzPZGSa z>j~G~qM`Z-M@FZAV|I263LF;iyB~PSF|~N!FF#Ay{@CA;!SNSJnDit=em1aFZrwn_ zdbgyWxcKC&Yka|{NC2Esi4;Y#6z9upLH8|NBmqFrY|xK!v8b}Q&O2@Syj7EEkPi`S zk~+j%TjTycV*9E>oRcL0jZnWb80o?(k(Cz1HfBACV=I0zUQd}HL&lCex$R3 zmdZUwZNi~xO}ed4a1Z{b5UH(>w*JPSP(m-oa_(Qc;H_QKq}^}#ESfayePyuBo4Z#k zn61HIVVR2<?$eV2bFrGA^|!Cs#_ zmKwnDAex(Zpl-jEZdkwGDS~t|Ihk>$TiV?ntXY9BxSO7g zx5<`1X2^lRPhS%{hcCxo|5>YCDu(SiKBQhZu;|C;Qkk?9IxVqmgWZ!?XH-3b_I58H zQtQ#Lx3^S^5iW#3t!hSu^NR&roQmMhKFn^fegRzwT_SKY%ycovVbNVfAF(TLLd~QR zEBMS%8YaqURm@#&LiA&aKs?|1;kCo-=zAh&0m37>WsKW zZ!E`WVNc|3?2AlO@l{i-#mRM%45P2K{;P#@n0g9hcCc))aigR{%juvmCUZ+1eUowQ zZ?}%%Xeq$_n`jGWk>RL zAB+qZx89U)Ie@nESyTOjX>iBbtSTFa>x*W}0EK6`W(>NgUC$0?PuJ8q}kex>nsju;;Va{v($p{mQx5kXukJgrrR z3#2!eF@O$^Sp_dQO+J?>%Z{qwD|M8i{V50b!-?lQCGWQs`=5^A&>!9d_aqW{;BZnryZ-rg;5*JnY%3 zpWsXfKp^;!Joa{xKkqK&k;?k_l#zG%0HL;-QczenI0$~MI$C|S{AU|QvW2T>yxLf$(y<3Z^+td^kX+G?CK6} zGHYRr5GxFEG=1E0=N_aw&tILQeY!(#UAk5~ZU4gy;y!2h24q(LzPXeuvEnO)tbJLg z!B>LV*YlR?OMr;=Dx1eqocbQlSgM8L!+T0E>PV~%VzL*dwZ8IkVpj(_tCwK&S}C_U zS!FhihZ#Imu-8u_o7=OnMlsO4;GVaE6OXd=b6GZjyCv3Hud-E319-z_xod7!6Bm!Z zPo5t9C5<0?N;SXjP+7bStM#Jutpn=BHq6~^f4FPx@y^Zsq-~a~r@KRVMFAac+e*v( z)Kn605numeEkxXuWz__wtRB~ zN(Mu)tM??#?d0%Adgl4Eey%Is+EWi_Hkt88@#*q9PXm^zMLRLF`a>ktAP>e`qL~zq zdr$JcdD%lO=`E2*KkgZ#pG?>q{UFVH&5E#jknfHiU|BB;r;9K&mgs?6==;GroOe2( zIMo2!cw{S^R^NyH+R75XmG@I0OWKeg{S9~pQ7yksp#_T4AIk)=L;c8ewy6=2&!>U- z!u+SVg?~x~SFS1Jy5`&Zbr{&7oZ}%`=95+4npyQ3y~za*55D4XuqC z@h8vG9vE?oj~0@HSG`1~voR?nK=H{>zv z-ZD+AHqeEfnHq_s*rLBTjt@V~Ll^pgGn{MS)7HEtqQ$g+3b|j4DI^(rr*dp!y}`K6 zt)BQn5hbeVQ5sl|{0jFgNmguAwj1-!UyGY~=& z4!>CTbzibDl1Xz5E-dCGmE=r`k@>($S!v=q;!>!MjrPeyS(E3mF6WyY4Sx#N6^~aS zM`y7k!^;ze^SSfg_ol+94qk?r_$yT_`F{sCJ<|#ac@+28YFv~F@MgWox@;+bY9wGg zTR~gyygs+ao#)@*ujR8!udRv~WB!70c=;+jp8jV+Mfgdxp;^gS zrPeyD_H`D$Tm!mdWy^$?C-^hO$Fv{CeG|$d=|7&GbZ%K8WYr(tS{zAH(RpxfJRe_=f+^N$O@4i|CH_ zp7ce0{Gcdrd{va3|H5jpoMSyBhj9N##Woszq=DUD0MEvNpWGZyxTg`?PHJ;Ed8sRZ z>Ya%5hQmGeUjkW%80*(~%;28-$FK}ufazB}W8UjS{KC}_4KCxn4RRl=Q<=$$hl74M zD_H#Ug7)f;gdst&vUE@Eo4D$(Uy ztVbMX4k3BC!WvDV3ctBSBjb&_xp77(igUv(o%)&uweW>-w{{bhr2>!gCzgzt+#9yr z%qTa}ayIurfp$&+8J@%{Rpc?a%(fTNl7OOVW#;UAZr->~mDS?PW zV$i-VHLIdMakF1bls_iC;^wK`=`n*8=ggZ$y?;(~FL^T@KUvweB#D?VxbLhkJiNO7 zO0F=`CuBn1_%&&?BsZ{+(A{q;{$Bgeu;oi8w&?-|BJ)bxYA>g;ID%GyIJ-kZt!<3zfv zb;QQDZymaDzZk#2#HQn+`n`gAKkwX9Zv6E*`I^SEZ?#O_5qyWopY@l|fY8{E?iw}c muZV?5k*`F|{!i&aunv$#Mk`kJ?G!tJ_?Q`488$#XH diff --git a/docs/images/ovn-inside-k8s.png b/docs/images/ovn-inside-k8s.png index 91d9981986ff8bc77c1b7ea96c5c24f084cd9ce0..3393d0af2792fa719e77e955f085209211837a63 100644 GIT binary patch literal 39743 zcmd42g;Ukv_dR@RkVZg|mXPjlLApyMB&4Lf5iZ?*=~B9p21%u)l}PA%eX96V*?7bZ4x?%G%!H)EqYcp?;2Q>X8D7y^H*_P5Yz`|9(>U8*E{xv)bymL2 zM!!(i9VT)OjO*Vx|FTrZ9Q1-~F<>1d==G5ySCHuJ*x)mHTl{Rlz4Yh{(j_7pcX?Pk|6G%76?zq-c~gFQiWkkxU9K+vB*{lY=gGl?J& zN{HOsH<})qdyDR=_)<5W$M(Z}dp48U=?eA`Na0gkHQfl~8mdp(659$MtiRzVcN-q-BeVGedHRW}T zE|vCT-RuedT82#ffY=-qR^?~9N$;U&feAsFMY;njp79UK6 zwnCNxJ4xM_vLGr$aNAiyLd{|%sYEhKlAlh)${%&;ceHgp`YMlxa zVaT;YdGyNp0T3xUjuS6(869mxv(*cFM>M2n)@M#1 zxaY5zK0J1~R%gI{XfyS~=um>e#UhNNQr$`O719r|J$)C$s{&>uJA_IU47QdQH$9lj z21=T6N4Bk2wRD?wjJAX3D09f!$i}aO5ER78&hgIY5@BT0y!rI<0ngy^dyLcv8al~3 zlf+}i8qXq)_j4ZRAsREkkEbE8}T?#t#x_X@T`x-x@RERKh(0f}=>WCFn5LEbCatzqW z56GEFP2j_`y{K$T$6VfO2#%Jf3f{?C4E(EM`mCa?DP!|l5)x%YXPHJE@Em(^qvosVT(T+AWE3JD+#{ic zkM+~{(mVw-U4l9m)$y9x%vM9YF5bsIO);@bERsddye572aur_cko7+$Nya}Qor-ur zIu)r&)L zsoLSl5$VvV6yM{h;d^U<*?yuda6#CNm%ZAxG}Wu-_YoBPb!7=Rl{DMF z)!(cJ)EU>|Z`qHMadI7RsR(-fpH^5dPTk9Y5Gv$X*|7^$(~yv z7oQaw<>bf-HXb|7s#0`$S*&s%(0^6x%W7OK_4+(LMg6xAud0Q(S+7SOwUnM zk^Y^(YPfQ>f^DZ5v+!{FQEy+KtH~xT_W7;Fsw0UTQz;}HF`H0AlVmb1cP0r$k{O@+ zz(!||9PDD4tEOU~hl6gUe07m+w-{T9uc$(bi|+;5?rIdZ)2{*Uy<|;vVRT`-AbLI! zR@Ec~i1g1W3v5(Aw)r8(*_!HQFnx^5IoMqFJP*p-z#T#q2E)<`RiVQKPoeI>5shn!vV@?M@(v%|MT?avcBh^eT; zG(qo;K_Go}1%X5=CqJ9<)<1^}vN|>nr6mCiE%sOfXE`=A*FX~(DuX{(vDm(vNsvK( zw_d==Ys2TRmpe_(zkfu^O16?G_0^a*e6_Qs6G0-3BHHS$TDd$DTtuEMLoB7*+$Cb( zyzV#7ZUqhaAw4@g%cWJU{*`QW^!{P_hfQ(}xdZQt^pd4%l9}2y( zG8|<(wcsNr5VQD9YHy$`sOkg%XtM8OdB0Z(i1D%8SwVxXhZhSE@Yqj=;75e$8vj%s zk7Npx`xf=W)+3jk(D*CeK>}QFjrIOc%3zZn!cgR>0h ze#I6G6A&g|y}Y`^Z3O45+w0-XxL1P zPIN9pnTXqUO7wg9cS5&Ah=Bz&J5G`UGuLWk#cfjvigV$wnY2kg(wana1&S;?KvayCFWn>thk6qcxPg8fdbF6HU`rp)rsB zLD9J$z(!#iJe~KbB7~PnD&Rz-M$$&nvbNd8f0(L2LIEpNs97@zf*yBvNrcr)^W@Td zfs^^8r}_YboXx2a1$&+DP$>&;uQDe4K8myaEw5JS265|U>_$l2g8(+H)~oFE^EKzz z0It}f^%axP_?skz7;pXgpeVsO3Q^#J&%S^MuE~demeafx!VK&v6RSc$9fLuf2{W|k zw6P_ESGX^aYgX4fZ_^XOm z#2AtKK%N}NLsyjD?)*B{)7<=|y!NiF{tSJC!-VqoY&#C3%FrJA==9w>@A9`B@}^)9 zn{*_9f3yvF3-L~eIlTk})}GtFRZD**;`>KlH%|Q_yu0o(zPACv@NS`VrUcH{z+#`p z{+-D(xsOKo%29vgxsPB+_8Sxn$DSi$EX9?$0%@XW7)k0QwAG47)wt305$`|{D?ULl zi-h7`zp!}Qo>lhqTd!Uv^lw&?Jo2|Y!&`6WlFZ+9va0_q55^qQo#}+y8AxM{<{iR? zz-Y)(6%*81Y19&~Y^j1UYgJ$_U%|@ZtIK0M;D*~gR)Qo8mlMs_9u3-t<4yMwu3Rjb zSM1F8sb%iw82Y!7>pCW39kv^ zlf9`Prg6WwA(E$=IrPM8H|c^$qF`yJ!&#=nnOyfpC7SbKs{HqcbBAkHioWv^-j8(o zTt}}QVqqt^%Sco~_griQ^A6(rRqy>_oD@yuO$48eP4)b5`0ahv`gUt~i?R4Eeg|~> z3yvI1BriOj`b}H<2^V5>_gVs9&lb>f03Li2We8-LD4h8cN zqXUx)480Z#mrdR~LNOsKt%wtSefD2{0y3Kk8D=vT+_E2=jbJz*6V;;WM+!h8)>a=- zrb)PGMZ%D)`fK*t6Lvff4?^N^a)C`W=lK!~W=Y5k47r$gld_6P24X6<`5rKo(~mFu z>DL%#5=6*!@YrZn?_N%Ztq6laaiJ>MllvI^*bMzjVExWU8FrC%EGgp1LwP?E6i<7$ zSL}CJ?_~{hCa0gicO92{SGauUFQD&~8N1O-U>gB<7)LK$_p4YMe^a#u0S2HcxpeOL zrG6q(O;+6u`u*LB?mE+ozl1#Ws19zdo$cH~v{j>(t62hXl=pIm%_M#5m?rNDcj<6H zxcykMD(iy;bc$`S3z!Nn1S3nt~E+3JxIaG!@GQEtHwi^fD6Jf zm1Hf?J^*7sYdFkU6zh|%EWyu2XktFBRt^*!u3TK&Hh@0l(qZaxO~-ebL`F7x>;M$5 z7GCSL8i@E?RnL9-p*xKD>3M7y4C!3)zCD}T+tjhgfdYk?T@nEU9$AiIGqG6W${$C9 z1;!u;ieQb3kiu=YQd@$$IeZGlJ)zjfJk>$-%}e=$Xk`}LwP-)?|`x(Wa`~q%j}7h{bB&PM2(I>AlLl{Zguv*Qi{< z`Bb{PZcSG?ZQQB!K)vCR2onVL#zqbaZr~9#SF)NHElBh{H5GG4nS5eZtLLM;#U&a& z7H1bn1i=^rG6z3&hmB%}TWW!6NQERV!e?Z#0oM}SQV|orS(;c1c z%)f%q8gC9Yo@Y5d%bOi2GZB99Wmzpm;(r8%1~d_^HjC3}C$e`3_6h1S2oKaj?~B~2 zBSeZRJN@%vS^h21NcN_>!|yfp;4`VhVzP?EH${rgBi96ssInD1;+Ym=$6Ur<(AwC6 z^;}SC*0;$qOyQ>CwB;BK5y=RW&)>BX1(KJghmskpoV z^sVJmBFKlnNn?Zb9g73Jdg^{fv&kFf7MNBEWvAl5ur&axJr2t#`z{qUsrG*BU(Pq8 z^CR#}$_oU)CHTn(lB=39i72%?iL@pk7>&q01yu41wgrLYwHe6Xi9I%%t2Z?8zYz$ z-RLx^CBf>`b+8+0#EQ0ee}VHozOjrtsKUy~olP5Ka5YXjr10_%RT$FB6r}pc=%v~q zoTY^PHu4BH$JHf>i}$2-qu89jUeJ0lAG0LwJ{L6jfl?Dj$hU7`rb8d_a@~s*r7;Fg zjNB#V@hIS1k(g7|MKKidC5nHEVHT%BG7_I!8~`jAmwq+;#O)7VLQG>9>70_Um$q@u zt74im6Th-E3$xBv4-VMN)f2)79h##J8-SJel%VEGQ3VOk&-YF!LY3qgoc*n#@#5Ch z^Up$Po{D&P8l2liX_|P;j6s>omxea!&l_$vHYPzrhNY$*$;A4+-F;}G8_QE2G>+az z+(+&V>(4v6B*JtT_0%vx)0Yvi&Wm98N^P!j=2wGNprb*?hrW!Q>5_bQlCd&+4Os&B znu7ZI+y$=cbdH>x<~4qbBC>4BLYd1Fe?poVwueaAiRCgHRZu!_4K4xVOc1OF>Bs`F zN8|+H!Cc;6CN8k`Oj>i(BWH{w_}a%g6}A;o$4B!#tw(7&kum3v7i!IG92Q81 zc8Tik5aCxrGvy%kA2e}{9MuOdXr^wfDAoQAvS+w=r?L-^=09~wR!5COZ3)*arE?m( z#b-p~%z}OI;D$DL-SS|$t;(HjZhACX}5gj7Q z2VXTyXttm!ra?GSHr8%QQ>lUfJ1n`U9ecB9UHBSmjU})(iS)ssGy*F%Btc+zsg!%HI}pwILs- zM!=8h@-upZ6aq!s)Fa1+l_FWD=-qd0On81`e!%7=b`N!8w2`RQfsy+2VSR}nCt?Gj z_t*aqtr1qL6@1PTubL&}QCPz`=2v{E%pN72a2|T26GfCP6N?p6{+1k_l&K(7=A*?A z841*L7OwDT@QU9+DiPC6-QB*NeW(93ieiVDkEQcOskAdCaqAqh>f~*vlzeJivHxu3 z%dMr!!LWa#H?F+-&y4++5td&wtV?!gNT6y3GxuoX#7U?RVBm%Oy2GJ>vAF53U3^7W zi;&(MR?6QB#{0G)oOzP<_-?RZbJ%-^1zrxWofwP8>==W>83@((_mg9IM2LDJQM^5X zul7d#nJhys6t|O1Tn6NT6wTo%=E*=5WPakfDyDGW2`b8#H#3u4Z*1pXnJ{82tS>sz z_gi$f=?oiL5N;Ii@pXX@zK1d@?nE2Gy7t5@9CCvCjV%7FyX~&KqA8mx~xDU(q-gNH{%y*%6{-UAqge(cWx<_t zq`4id=tv)EFacFy!(W1wjYR%j0tSc%4S*A5)Cby|J2-3NT|i%nnGXwvyMzN>k@Z6a zj!6;nm0L68ei_YXIkofkZZlFyZdd%HW|)((Lko_?zU?9k0peaD?1Xd~?MsrHZ8vC%Lf^V}BxPt>8WuRJ z6C!$GiY%zhIUBOtEf4l!p8by@P`cE&dh<%|mWE+{Um`5!+>SD zzv_hlLl5XJd0f253&me#3?jB6Sa+o{hq(CJ!=F3;y~~Z6wwP8UP`$mw z-*>7-WI8c+^MzqG2c4Sv0abYTtf;rOu)hW?(M85u9=>h zVitcfw@$`Kz7&jON6-Rua=AQ;F1-r!kUxA;&jnUeTHaG6oEY+9DWz0AQ`-W`!&^BH z3ifyplq(1EGK}wa=f4Z0LwS2TFwT(QnON7nCvA9lKu**>Q+L{t!IX#6$Tu1o^RgHPPBV0KXC&#T`xd3t0Bn~2hq4~?pQnJZ7x93JAcVBgtmgpET zV=3?jT{|kJ23zAjYP~jM^iEtQ$aHpL)h5p=njY$urRp~EYK_Yh5JinsH-K0Cy;;8u z7)m0M{;I-z0>~V!X?tl>;nB_jYj=ewKhs`twn^ls1G?PDV?3226<)@#JF7G(!RMa+ zp_2n;+ev&1m8zRx#NbyYDH-CP@VPz1?=?>AvWB-;mn1yQx@8e}{5rldkzLkDTdnJg+`>$>9p&Ym6a6nJ~b{`JmE%KCWxR$zZy zY=R4nIcXIdy}Is!G@X??>e;upVs%F|?=2C=<+_(#!GGaKTOdq)Rm@POrbeIghk9M~ zvyu`c$}*fnd&FCM+j}q4iKd0?1JBnPAr28MJ}z$?H!By=S|7`UszY+7RqW+t1)=S=;pj>8HF6=d^f`@D#nY92^OW1cg zxB+N#{g}I+nNZkK*A6?EZ@I-lfR`3*(D=sv{N#!!8VeE70!?a%qB>ctc9!Y`b~NU2 zFY5y|4YE)nyv4eYmI|m_)3bx%w06=|slwG7gy93eC$Xb!^L*BI$A}e000h$OmoHGe z{j(*)3Xoj}Ye?PQ6V*i_cl8|8$=ZEQHzi~{-*UK%0KfC)7Za1pj)!fsAwBZ3 zdX?5pG{dU$dVMks_d_-Sm-gDv|f9XQTUQ|S6O-H*AIr=G;PCtDpD#CE?!9R z7YYWC2SBNzY$VS6eWw8ce(G+LGnntpO?4rG=BwSFm)K6`9-p*|iuu~ZAZ}W$%H%^@ z`mBPC-3!ImS{SP>_vj88L(tVgn5a&C-hq$$fCY57&Onl}WZz+WIVw>KORuLk(#kA{=0xz7fz* zdF?N~*5tTu@wyi($dAHtu&H9M6k{;l-_^eO1lW%kbhl|WUks2CEKS#lk@VU5wFhYl z#F?mqs0+g~@ewl-700|57ig*MyxcXD?DaG0ZAA0B98HP zZ&zCFG(lHyr(@p35(A){(6V~{=P3?z9RoA^xYka$ z1xR*w!nCo*pKC6g*KPVkP*r?X2LqCJ=qB)==;Y%yA27%CMS`A?b{)c%5^QUob~#%% zn+S}?|Hzlm668{zx5}+NuTV*xWO=a-)r!{>I3!Sq75d!P@+p@szJw-Q1AymvKdzL6 z^u(@$Z!WaCv-dpg9`Suqe*$Pl7DZFYtPEyMdt&2jeB&(7%k2ffDW0iren=;j$dR^ z)yCICSVkD$bml;mc7^_hnf_X8P@DdJ#b!u-S#t14JCdZyTcUn$z7<6$Er2Ox)A7&p zLLn(-F8%FVRV)|KF8D;NsPWa*uCrb25C6rNMWlOmA3nC;YLQ{L{yD2|0&S*2YyI0n z+SjO5qSj7bgqyI<0_r-0qn?!zIwZ(#W=*pwwsC9X#bfmHq?Kb(Lj3y&Ttnt@$q)Y5v*xD5 z1c~hCH*FO1JN5J~&WBRNM30+z7xpFa&ax9rSEB^MA({2Zn_SP*?_R3^w3ut~Dx;(% zlV#w7cGc;xg0*m$TjkQ2Pd0V<^umO5Ob`FN=feix9KMkA@^g4);nX`)ZuT*V1^orM|RUiZW|LR4*6Ita)XvZomzN=@1CA(r4qsn{N8zK z%AwwttsO=83DD>5rRieb!AcnL>PcUwzW*BD+Kn8)DQVJc>5e}Co_K2EFk;vB6TT>dKv#O3zhy0rr=DdwjeoG!KgnrNjmoaD5hOAik$Tpnko(ped{ zmJ&1(%t}Nhcvbkf+kbv^mH9EOpBQfoF#QG!&DL%N&kJ$uXKe?LFD>AacgBi?3;s=? zzEB^yhrZLT)cH;|mhS_RT~zF3{fzZ%&F}@!xR-je+4^Ra$J)TF1Ac`^GQx-iM)Jnx z-o#U4G0rw~r1We3sP=aL61+~YlO68o-)v0YDgZF1K7jR|+eXcj4ADqJ6e7M5f4a5G=W5P+}6JU<8@7!HdeK$=PDxM7(i2Cs0)5j|;v~>>RHF+z{ zV1^-JhM%)OJA2e{A*mQ>5B<31X>(99`Lyu&GE~!`lWp(XRx$e`zq4Km8{Yixy}hp7 zAEwyR)w$>U1gQ9%Egm-^AMN-BmM&D}BTi(HT~0ZVW7GlbTV+xKl%JwXrouzG4MYfF zFI+su=cV>>uB2|5HSa@UHIS{6_4M;~wNeD794El$fTPgCN6a~5R$o9EI|WDh)RARu zWqtV8{nY7^HkiTCu&xXceOh|d<#TvulgX%0jn)EFq(aPTK`WcRk3nD2Y2e<{Sl2@Z z=hg&g;Ju#br}b|)1iJMMj$FQ`XpcSx>DyvEmoC<@4{%WO0&kMtMEk(+e+LOGFsW(# zMVZNH3Wi=tOzcb}?8?qlLWrQfX;>oE!?7Kh>4b>lAFUQ?m$&7nFc5>FfKP2YHoe{C zAo?oGtkXvOv$UcE|KPVC% zmOgErgP`A}Rmx9x_4PqyMEypl;fB127u&VZrpYn2 zh9J#KmXM0;aUyAr2oHQA(jRm_&CEV?QzHmF-9P#L_|RCOKE7?YgYL3bv}g-hLf<4fjfu8#Wxm>t#I)jZco`RNu%HcIV5By@MWh%H-dvnajZ zz2zP~gO*ll1^|W_EjP$ zc$YhJDU$>W?a*sS*VMkhRbSm-vw@?W159qws#otrjP}u%mQ*YI?C5FY{dc5r-K&}p z!k<6mqPTMNLEhK041it34JW>&vzZ|;7^6_(Wtf%usDJ(U8&(?4hd_wNDV(qMb}uTQ zaBZNPHhx+hjEyNSa9s*%R)drYEvy4Y_1vtKPO#x6Q=e{xcQXXR{D zybMU9df?>c5x|R_3aQ7Z69S%unUIoeh_}!Eq%rHu`Yf3$#+6RDjeT6vWzV^`u~)##}hgb!OHr znmtiv*=^N9BssHhY=5T~eWY3jd={~%w0}wPcfh^hZSJSTHYJM({Nvm@$QjJMjoOl} zeRW@K)W|Sl?EJ}g1^DUUH2u$Tw~jwhu&T7n((ba_y&XQVUUi_SJ32j{VL(8|IHP#! zaT+P;t81ai8crCiuhYG3P$Fh+JXGXsz)tThq|>Yuka)Z4@bE={E;)abYZ%n~%E!YP zk*Tmx5knY1K5P;?>_AR^iyYBFU&OxgT3Dv@d|^~_psI1)DOH7X^^F&!@z z))DrG6EJaEJxzr+$rA_9U$1A>h&<`}j|IUlmb=DhGB^0lj?K#ecsPz3Z@+*5_Mnxg zDCV-g)I_Gj^}?k{@}rT-B#-RhtJavtjR{B1SY}qDOlvD!ENU0K)qmF+3QY&wWHwcQ zEYPa>mwJksA{7_mn_NtSfOchf@@HSE+M4jaBu73q1d!L68XuN*2ZiL=g9li2`mlCY?gEMw_pV zugEw36p*s!J(_?t@cL{}IiuD8Uf29+rJ_e)$O}a;LsbKQxzmlDyzP4!;qXv6In%vk zuaElRtsx4F8pAOFmKAS0cahy*ua9v|P7!j!wJ?=M98WuH>7U3VEmd(AvB|hxa{jx( z1Klt0PVctckIyfPv>~d1ra#NqRerD_t{Osp&k2DXrQ#3hoi55M<~**hGmdzF5;A1v zbsufZ1|B&u8`@}ti%;6=KftPUTr}Wmz(a4onSyOrZflHk@2HW6K++E%(UdgmT}UtqT#M^~kh7_X`qz&Gu;X~m@8Qx4 zT!U27s}CEM^GJ{1^W#=;3uPcU#j&RYLxKi5WU_MRZW?Va*5M5Gi;qGj$<-PN4EEY( zq9~?LPh%c_o>q{ujk19 z0s*ml-S4ZwlDDrOxBIa-kR|c+RXE%*FbqAeyk=NTMnL{T1%aGe!1*f3UjgpAU3aY0 zv+t_uA-@&&f_P`UM%=oezjB}_5!?m)er`| zqjc8#PSYc<5Db%d{Icw){=<8uK2lf@NcQpS;5K`2+G{(5rh=f|oWDOJhC9lOun#ka zmqu8=X|e>bXS}-G<#C?AzckaXpP}h^VTZRmWk!edPw880!EDNnu9!>!m1-X6;u3mCiX$l=Qn1~w`oZDYr3m4Sm6o3+{A=@fj#xn zvnZrlq5SzL%k!zF^zjm{og6zEKh41_%Zp5cb1%dvMsnRD$1^-h5H*BG>w(`^zWh`u zvr)Jk!G0n?U$@ZUsb>*+!K-kQqw-t=gR4fY#Pra!dQq0aJ?lENZq8_dv&iipm*M%Q zrC%F&cg`sNH^q~zECi_P+Y{gi0wJDX7gU3rsm?5`0_k| z*%loTh!LPTcEra9p`J9o8Fg{WZ=mZV=lV#fwxqb=6EVaGOs6G z3>JKO1DANDk?hHgcfJ0_C!`Gb{86YmtSUK(f75?|=!)a>MYQCK#T?}Orna=7r;>=i)8V;Z0{t#ASJOA-(m=~-b zmIcfCqtU9#YzV7MMTr}z5{g1sw2J`{*|jB~JKH#}LpF<>*qiK6ZiKgt{qKmj#Z@S*I*c zVsjQG@mCNz?W6~OKp(95^Oc6@Z@t$O0upAkn+p1mWYVt}z^E*J@mg!_ z6>6uk0-QI1nzcDs&HK&r@VxVzm5oh7fMp^+_CVSltmwGSV;ItX20hC}o5_M59caIi z7Je81PJkx*eEfTN`6_&(d%X%h3~yKzbubA&Nk)ZhA}0*e9A5A9dY{*f|T@7pi@ z)qwnK=VOjyB6|K<+3G0B=}bqt7zq84Y!l}2qifq0%m3ZsP|51fYj;)i(^m?&E8VS7 zpoEjwa#0eKG{`ic#qTTXESK{P13QO4x0TQ-DePaC^@^gnAREfvJ6rY<+fl6(aA?cy z|3Z_Yeaw769B4n8-{Us-8F*MQpOkIMI{jKt8D^HmZZ~a;O5iH}yh8UIyJ)G25~4iH0|38c?oMmQjSSc3My_jN-GB`b z?fU)xn*BoxJU9qb`MEkRmI@A|ps?_hdiUruC)4T|8qV;fpc!qsW|Gjq%Gtkb5^Ou9 ze)MdS-hJ7;uZ6UtdlI%F?uXElh_{V{m_PjWjWqGOotN@D($4Avr#Z@MQGsEhQw>n& z1qRBV_g(uY7b@7HPX}FJ>1_@{_C_of;72gvNjaZ?Q*fmmc_CJ$t*)Wt8Z}5>w#*bd zEyK=(>aYcR==jt%isz*sw(qiLKm1vw$w+>j4D3CA3PbuI4PIAd(Y0w7;3(SQNV3!_)5oq*!h#>NTe8{!jziZx_n8|=Zfv`wxy158CX95en39rQ zg|ul^)4f%*|Bm27A^+CN5y6jG&+;}6qs`15RU-ufGKX(Hm$i$EJ{{5w{H!}K_bxw^ zotEUt{H@?U{KrA=@@F7wum%{3y-zC!{fRc^3D}O6OYk#Z-v%)cg`NPW4>}`V8r6jt(eUX+t{W{%&A*hPs|R z?OI+V?o9rIQ%&7miM!|8o*FT6(@r3ak$n(skC=@dvp`YP2&PUjBcTQj64$Ik4CF8G zH-(UDXPhrsm{$2sL8I+ujY>G*BIl7LvHH!!3Sa$+_ye?MgC*+q=?1O7)ZHU567L!Z zr5W5e(5y{uEp;R6X2bv3FG=8R#2DL)w zk>G@|F$MJpd49y179e0_&Mj7&ihuQ8ipbrQdeRHAwOT~n{_sCH8te+PnKu4HqME-R zkTVrLzF`#GtKJWd$fa2w1D7e1<~%Q(e5aD7>S0h;O2^`?iWv@6RXqeACnruL`d2zl zyhDJ(Ej}Y5L9?47H@nD1erF#&w~{|_z8?NiIZIeCmDJ>F2L9z~>Hyk*yTQwqTP@?A zE?y54m*h06rc-7fYt-=+hzyZHhv3*|w zAOkSZw0|J~s9?6|_a9+otkwcYDrSRX-zWu0pM_`f)#2)#$A)zI81mHRgS|Wl! z2eO-td&8}?0)sXrEAjr)5v^~bda~LA`3b$P&nu8UpR8hwJ@Kp3JU#s`hII!H6cA=` zUAN|ZrCG9yoS;zxzV5q!>WMfGJ5W;TY#s5K=~iqiyu>^AgdA;){gw_3jG=!GT+e=u za94EzCLPp`zo{QCLu(l&I(1|=o^r*Mr>5gc8gHMrDlWcX6?}zKcj2%^I}F4^QCk0u z+;YKQoy!!Vi@(hZIS5a#h6K~c>z7n-UsK=a@#$e=g8LidY01D~8sqRFIghy$U*C)v zMWM5jst@hi?b5%WzqxPB zM{D&`--kqJegMOA_RY7cJ-gI_U0ffIQd;8vQTTd12^+2&6|b6k(rQ6A@xWCt3n4lP zQ1LDm%ct-mkf^MUX>NY_sEm#2yEa`z^7Zh`VS(K9p3%C;XzNBPrIu^4%MwEk2;`fU zLQv7`Yn~6a;G)!@L@S>47jHi%cpH{AtaJQ`cKo-juZu6(l3?^{(^%r=3?m!mejuc1 z1EC2~UtmE%?{lxu3h;mI7oqpAI*fK({rt0Lw*y#5&*NI?h8yH>28rq4lL_*P8Xt|I zY7LUcZ@<4AI{d^4l!k1QD-uCh$Yh_BG^v%htn$xv6;mW9Q$J!cuw^D-5rHC3Bv#>w zpl{N~NiEKO4KN%ntFaF?%380Nr}VO4c&2X8YJW$^X-eq(*7oDe5Fik}Y@hFDRHjJH zQ^2dpDsWyGpMMhZ*nVpd@~lsCG`tf${7p%E-un99_Stgc=rzz@P-u#-I72~n}*dgMO$U69kiXs3F&=P!hxJ2 zaGS02yAm3S^RX??3z-l?&WMYX={T-{(Rg>Dj!j^1#IR~xHGvK-#58ff6suc~f+HdV8i*4#WZ(6x51?G@loVHl_#clcUGR`1BGO>@6^W|Y?zw0BlwW%%rO zLrfxJkSrlmiy)JCvVMh&-KJB4!M<|iCmHbC= zU{)4MT;(QezB|rxXBt#AixY61TyBVaxiaD7rUrSoa>%S?ZbJp7G}bNZjjZQXb4i8f zB26DGLzM3qFK;tS4;2LkE>rbo2*_?Lf~~n}`%5W1A?a9{rPvC|_D;R&iwBhsGt7`q ze?jKYG_^hns|0oSf9pv6?2^AoJ~yP+0qp1;K3_$}9q9{O1@-YIAbJuGrxQBhuQz{6 z>rUnsNgm9sTSJb^VrVk@=Io5XJ%UKue)>&luvQ9a%X{!Jt7d?!zp-jGx;d%#Dc-od zO5-Ljx?1o4SnRfUy1eKg&gMSvU8<;fT`)FUA?u^3$R1_N?V7)48A8qesNWQ6j;L_=q?{F4%27z zS?VOcEs9L%^#T3`u0EUUU#vw7%s-m@_?>wv9aB|r2r%X6|3g6^=DnDmRh2#juKjDY z209@@(S2FeE>2+oC^LWX+Pn$r(qKYSfnny6VL%~M$T%O^ zrvM!K*Zs(}nihK>)u6il7ae6Y-3u!uZLHcrTo)NGI&8+qJ}B56&LDlINH8vycU~WN z^(zYoFor@Lx)cqY z@5bhY_n^mTx5Y=S!?X)Y5iv{VdmGptsdgR$Pm4ky8GJG}KyW?c+AP!MvxN7P93|V8 zCXpC1yJpdI_<;3qeYid>yR7*8ay`Vt%>i_oQz<01^`(XuY(U!o<~R5mn{qZSpk;*N z(?sW@-22FjtSd%tk6(FwfNP%lUjr_+|d_g8BW%C1;08NJWA#;z#*j&{# zH88fRD+@A%mjUn5E%efze|+-w+HqARdZO;8oyOhy-F zL>c~7UzT%`I2>1tYG9>sX7uXUDyt6=f~xu=xeDoe)bHVxe;J--`Uxav=4j}hE-0p8 zpX!+FEeG09eOtkHGXuo+`eIeU#6-{bEXVVzUX7)Tg1XLOO9bbzx5j#|d+vPWfCTww zrz7m>J5yld9oWI`yY_CPb98b575UhAcBE_^rJn1X<3y;n9c%RBWU$kt5Qg3MkM

jb=ZmR=} z8Ri0(uBL}4yw`t&3Z{9mFQRLAl@1Q`JdinERe^Gk=5zQho|hEE$K0<{-q@AB!vGgZ=0q`gS+1rse{~kJJv8VslCA0l_67t9CQx3!(KKGaI{)B^E8j|DV$$ zbx-V38ue>QuTp8+PJ}j;Ct8V3=_p7rPfOj$hcj8iyO(gT&Ol8sRebxGU@)+sM zoJKYa`KR+e0aMVPrv?Yhrmh60z~_@3G6_t!UoJ+^cz5Ek(ft};kTJRT_h zsDJ!cY9O{s3#RD-K&rH#X4xQ>95DRqe%vV6iqG(U**H@e=u-N^$e(HryF}jKn+h?L zv-9OVykQ#g@BR8x1V_88!zf zf^0@roFW0Mw`m>tN2lMveH~=8-}`#`_LKYNNfh)zBeP{*m)Jxjc#h@E%(RMi*F`{(x&yqDwzIyC2PLi!|xWwJ6GEeDuBHMc~1l%0=#)zqSkJ64js;VYv;&oAat3 zs7V_Kgpye(lva|D!HnxmOS1J;k`$(ndu{p{JRbrUx1YjDcbaRr#*v%$)qbPG7b<2y zcXR(S7oD`9ZZb24D||Hoo3i=ZEq_Y14$1U*kENn=B1FfwyXgvArG$mTMIHHBeN;6& zheqyNQyd6sOm9N3az6@xZs_#c3MuHb=!Ut(sQRCk!aF`mNdiLdayC_Rq`IbSoZ*SU z{{);(;AWwNrV1M{9Rl6*Wb+HL-On@V_8W1v>_|YspX@tXkp6u~kz77Q`mfVX?r`iS zs*jOT%FCY8H_!o;ng2)CSB6#9b*}$YubeeQzA*c{*@DhW^30;M(@kNCeaDpBUR3dsi0zamk$u<{I!ngLxu|{ zzTrQM1xFeT9|9@ef%}mK^9sIBd8@^t7iG3HmkF4XAHZ^S=lwYE0-r0yS(Ib zsF}6Gd1vdjmH3#rH3k6EB5)U^30i1(ile@U3lJ2mU;I{9(;y1Co*{}-=PBeQ4-F%- z%@TU%)T~cg%v$Puu@_fr7thB~jC6E{%9v`+MN`Q~v_qvl9Z*jLJ=3Le;1=SN=C$CQ z4zfNl5)b0g>LvNxA<4h6tKYa_a`O3QRug(yPpjhP^yHC9repi$A}LM2!F~#|6eV0< z%*X&tlQ+XQ;(e+J_8Jhm7u}LTJ5HL46z6%EoX3>lfm=q~kFLw3V&JWD(RKOoCVcq< zrtF`pO+;e1>VO&|DKU3$DWU24Os57ryiOO;2L!2CUz35}9TtUKSNkalSUKqY%>(-C zC%kG2c)&ir%wb&0!XHGUSQ&A>b^5H^MEl+1x9e){*5wFRI(k}#{@OQz;vn#^$L zrF{(^Q#i+Uo3VFY_=fW?aEq^ngz@UJ1j|BmhpsC9aXen`%xfbG37w_>waGnYC-N0k z@aMHkrql8Jt3A&hF~fW{05K19hR5o((jB`>u=L}{)*9LxLkH-djWAcn9*%~KKkT+b zO8w;)LLi9tn51*dOFsm`z=faPnM@{@ig14QueVZ(|L!Tt{p&V`X3ficvJsNb^geW3 z7Mtz%gtFki7+{050oiR#ULY9vbWv}qT>e-ksKBsM20e58f=;|QYLl}&5sf{Gslnxo z%#{R)#u>FA#ifZgfI+i1OZ%Ux70pI_0@ z12{sKvSnj=iZNdDTQSmdl#$AQwiTj}2vwSU1w7wP_YMFGxaE9TNJU1HejSYPC7Q^F zjm>3gT~Z5aMJU7XS&p`42VRF7aIMY3PkI%_#gVn`!jF-S?vCciHe(mbU&4i$$%4e& z>W0(cwKRd~22fHRTg_O4QSOQ^#s17->fVh^Erpqy?B-h=F~$PgYt?k2?uYM$Ym=^B z-SwvLmwt>qVf)ukJrsS_NGK#^ZfN*qK=^_%lfxW0T!{EafKvfBc~y}r%em)Yv{Q1} z-|?l^MZet{tygAI+FJN|WlZlPyYpep*~bY}?d&VBM{u~HFAIXx{Xl7wJuo0W+Tx(U zy1v|6x8$7%tgR92-mR|hzvdMN9CaU3uGp+13xOvD#D6B^UMOYm>s3Xr9B+z0`{jQU zDTwlQj2&lAa5Y=%&Q1r(Xu-4&x2g3vS~tssamC2rjxwy)uJ}JhLav8Vs|Lq3Co_O^ zl0axJb)1cE7j~GhSbY3AlRwN(`~SJAJoUd8tB((|f1)bB3L-IVf^)as&OG_lmj;oy z$K@6Qi`)RaVAEs?CZd=k;#I!!WpYS^RkG5!oCOmIDpxfJfyI}sR(WYKK*!;@%w~6H zR2OI5D^;a9u_p4{qwMEB2?!H~N#*b;qE7Yv1106-+qTQCE2MYXs-13pDRZ`n5`uc| z152weDGj5@FdJu5SK{ajIp|duSZkqo5EDl%z}qZ1>~+AaztLI+%+Q6?h`?qzX>H4f z4zmIVOlI9%h{?B+a~S(qaQvopod?+N)}|}5R!0p;>py%jwA0!8nH%q0Eu-2iQYT<| znB#0Sn7&Va8M$Zmblh~As)<|nKuRSBo5jDgORoYnFBDAVJWVeN)(>`CY+zdTTG|c6 z{AlgGd{fnu^7DjkTltG+#xnlJPg(J-KpJ1t81a zYyU`wx$Yh=fN3I*o+JO9|y9vAZ=T2*PO^6&pmGZ}j$)Hjnd6jnj6M{s8#>}rAi;w)ePX?r?b z^vs!~o9x8t>FxBKJer0a^egHLxX|hD$Fg3$^M;)e)>+_ADRYtO^KohShW!m(^qNq+ zrMRPH+;Q;cXCD~H4jtzPksoXJe8uT*+Kfh)Xm<#I$h`-gQ@Fgs5Eth~fF(jSX6Y9> zMy@cdLgDrF+W}IzEjyrWejz1>Q%SUPY%(cu!B`p7RTlQK0iC+Q$ao9JcKkjQWJnU0 z23-Z{blD8nJCDO?pgK&{rZ;pILH!H*HlO0sa-vEwPZ5U=Fc{>YhDskk{|mex6;ddn zKIn1!n3+8%5===cAoDIBC4w1&gHpB&5&=^=Ct`&8?nGQCDso)RN`Mxf$*Vq@>wfmY5KyEUoYc?GxK-lfZ8Pmhw?cGl z5#IdN~8bn(eL*dUKTQMVs!a}Z!ljcDWjLJX~FEzjYI9D=%47I;97cuo>m3lDO?fZKYNs%+Q5J^TR`?eZ>R*aTNqi)%T@7Er+Es9x@TA$Q-FJ0p|Cr(y1H3`l7NRs?O2g6SR{QEwc;PeWA zo`&Slu#2cPLwByw#~_MLyXlkHdT>&K_*J596)>GTgpKELzeexKpyWR6Jn9RX9$j{` z_%0@@hw}Zc+vrPAau#|PPejqiJV-pDy$61z%d_}<#uSn;(&FD^nf~yh%hh^8IJDqW zM{#qxzZRd~@vJklv>>}Djmf@|;&Q29z>zv9CQn|Ds#D9aM zaWfzBsG`eEVZa+$pQ3M6V&a>&@DSZ8qbx3n>O%_yQJhSNCChtNYg?Kl=p-mA(D#@D zONTnBv>Qf~WPNHQ{LsN<^xUlFUx@JilUB5;Cu}R)dnp%eR&K=u%1|9brxwcUS!ju< zcqoxCSuO!`So=fkO1}+Pg!d6MEhMVXDHoP5OYF`BDcOm;h_7>snv!f28ucy-P3v!% z7?J2>&b>HtMuetR`m0ltRO-}YnDH{{5S&-Ixty@nzkYj4d&A)QRJlgxkQg&q9eLE> zW*~R?bfEW?xj>Q6;H=Y=p`@fEsae|+9?7L^|4EtGrZcC^+FvuI(-D0t60NX2AI71D z1g-mX5lztaSaSu?{ytgC8wLSJRZ~K47Oe7&gJm)dpB5_G z@y^!YfE`_Ne8t*;_dyv(ELOdRmnEn3cq1ZO}#S8yi#>2n2 zwA-WLtBymc^i%E0qtMt?Z^ONoi*4}msjGLpO*J5>I6J?MM`$-(SM(~A^7!mi6!qau z&zE{95uANKJ?Hip)lQ#AIOK2iivxqaO{?Ntmqe>(57pv!!thj5V(BC5oc`#ndL zij($|;f>9aI4+0#2wU&DOy&`c^$@ExNao)0yx&cQM8Y@MV>sQx3JUGb4g@pTe0qhF zkOvpkK3D#!%P;FURWtv*#ehRJIAQV&=ID;3k&a7F_i8+J;pl{dd4TeNPa8hzR`K* zrbB0rG4wjWi#_9kMF+Q?SEp`XWAv%AF@-OJvTbcLRamQy6+UY)N zOsf_a9hPrJI2IO2KJ{+nm3blUWMr522jO+xg_8e;V@3WGX>>k6wU?yQsUGNmm=+Dt z+qM`GG?qGp(?#fmOJu@^gkmlS}RFPiEEresL+!Oy(cMYM4(urtQylt%b#}~ znf-+X|4w#EtYTmY{;V-q^=+5@rouw-cYai2%mZUolr95oJBQ0Rfk`RwrT#3sCwZpq z-#cY04T|@PkW(vNO$?-i3T>ptQ>^>S=68x3sM=0qq8uyJGhPuu37}+p3ww)yBswtG zqdP3xr&Sj5PUSc5&aj0_-`7WEjleo9)^v#EuL&C#GFpoFYG>po7Z4ehJ@;avG3mYRUAba*$A^RkNE5RZ}z6Xkh|+ z6hK(FV2pE*AYaB>D!5q0Iamx@Y+cxCOou6`_ZypvvJj*S3ujy?|L$_c*O>X7>h5%A zEO*C*XW3vwOSYc-#T?(k5JvlRQZy{xH{B-M{HO}hohl6}dt)mJW5;;7Ctt@dAwkirESIF=7! z55N9-1%q;7ip}AoLr`g`Ep>7oh|V)`#M*uWlhh@B_EyU>P@Lk4t4kW6z z#wWIwhxgxbAQ6csCWQ2anQ?DHY_c%9Z}Saf5n@#yfOx=ypJzjT+amO8fh(j3!!#MdcB(SqZEev`z;k!+>H!;qm@kkjB9^0g361tpMTSl zMe={57h*xv5yUE$$Gjj#hNr)1;oTj|m7exStBhVPIj0M$-Q-GsOKhxV&E)vqJ5H`d zH_BkCEq!N%pW20+dvBX(qbEAflm6zw1MWwKa)*#J)!N-xZ#hW5fB+ln&+NUOr@h-Z z$U%+{Q*QY($)d+G`L(VaSZ#bnZrt((cu zFlo(&hu|v1M_*}d{OfCoc_7B;t3i_~sx+oBm2cc=e-F3fRhvFvb93MXs+ji{{%Bom z_J!SWAUWZ6`UVal$AW1W?V?+u0?T*4mmNoS6Ki%dLo9fOHtH!2vZoV!PTTRR2 z3tPWSb&+SEpYbYp0_f`LvOTrPHmASuHEnL^*^GAcocezVqO_On-tdJX$rKF-m7#SV zC5WJ@(;7`ChX@HYZlhtAqijxI_S-^BY3pOOFk03wJ1ovNnb_S@c!(TgB&|jT1v7#T zGna0ZXvHdT3X+I81lh#3v2$(GVTp)FQfv0H^yS!r8k;Yv6(ZRQz10Gl^`T+U)4`$NQjiTkP~A44m55U}+8SuRg`7ugd#oS6doNQa7AqAc|5WOak<#){asdfmvw^aAW$98KB zq_-T^NHWd2;#8g6SX+%OA1skLvj}Q9;nI|X=@%s<{g_!rc=fm)|BWmYe5}!ylG@es zaLm{$lxI$AE>ijXb%e=U6HC;uYb9i#$yT!CYsh&z*2fhr9(3Z5=*0CaSoP?{v`G>k znh1;)Nb9IX^u#Ym`Q3OpSEze5Du)K#zX#$D$C$uxl#xN#toGp0UJHUD8CfDoZz<=t zdvFz)Dy$e!87)6-b^rJuP83R*1lTO#4|{fup%S^el=^qW1f@LRKb_W@e)fzrr zO2O-+1&DfHs6-TIfF0&?7_CYdo-lUMbZMMrdqtaQwARg*zswhbOuxh)5=M>xcNQFZ zF}L8<=` zY^0bfQCMOSCCp5$&cSkXKpw?LJ-@P(uf#fz?HNUJa!LHgy3d{sSw)wmMY-1zf0(}? zH~N!WYTW)E)i zZi}6?!n+8}b4}w{=rDq!T-2Ic1rdss-uC3omwh52XLg7vi@rTsH(tb>x7IYFee2DY z@X6AVS=mUdGx1(-PK)4vI*2`2)c`-gNoYf-fvVH4I>5M1N_?sUStP(3z$y)x&U{e$ zI=19!>cfJn+3Kp{^m9k$NI*`0ICI-dk;{pGlHaWx?9a@-!Y~xQq;3-LMa@YZBwKq% zWbS17o2T_>WGK9VmL8$5&Ft{IFUh=u^>vf~6-0j*z8X%A?lxeyB1;%CtkmA;Tpc~% z!iOTh7c$Yev8V2&UaO;-E*?Y!C%#Vo^jHZiRLk)SjHJxD5`t;ontex06d#nMZ(ea z3J}~T))dJ7JJL&<=PG}CCL{jm|0}S~X?PXuQA&Kvk`Jd8bZgVRz3D{jeVR8QMant; z`A-+z$Y6PZho+Qwv$C5l3AbZl!6)`)Zs*B8M-H5-_XC5nI*iK#y!n?8=S!LCkI&nWRkhS5{?&@$ z7RaOk?1Ql;JZf~XsBRm!dQkL+My;JFr_|coJ(LO`)73ZBh;cf3!xk+yMRw%G_lG}K zr0$lMpuX9;Hq^*>?&s+>W(4`bq#oePHYBK%(aF*?Wmts*KXqh^Xk~O$@t>IUA5xtF z`ss#Zy=go3A(cqSwb&I8if8g}L3L$CJN!=WPD8}UUCnl=al-)+*1vwYZG#I1@8NmQ z+MQWuXxz@_-de6d>Nb@k_sEi#;NMfX)mK?aRk#*OD{cRML9(XjVo3ITA|SttBk4VZ z_?tYyOda=6SeLT7W*`me@;KhF{#(NHzG@7Ek&c;^k4o=fAMV=IuBf z3UG}yrQce$QPf^z6?PSYnvlY-;kLL!IR;*Ml94|k7sWaf(PEma!mlPO!?-NBfy!_z zR+1eY`=rZb5A$mJ@}FAIKC&v0mh4nxmfV`jzLoQ;ay6)?h)I`be{Dtp^&u_BjC+M= znitxpmh_rrC#Sd!z1q6`#;fcj#OsA;) z@6VF!m(?K7y&aW!2(uE008sh|Lww)~2kJ!wgMJ&rOiw>0vuCQn@1SaJTM%$(zF&H6 zLkHvWKl109%>ramHj1oLl-~^LVE%y6FE7mH1LN8ARi}f$QxorsU=Q6zJKxsXzEu1lJls;&WGShUCNcpUy_jx1ODvYTFL=A^h^xn9ar z#VCr}L|a;=?<0;UD6CHh9(;CKO03jf#P?sfc=!e}XEWq+%*yF1o??=f9%^-k($ttW zEBy&^dCP@M4Ktwu3t104D{rdzdyY)eA9J7lnb%{TfJavV7V9XTq(ip--hr%?#4b$S zo`=jM8S&ODK!jqf@_qQ}`sQ4SA?fU@g&=!ZpCQx`WldU@!J1Fs>@oZEqs1lY^Z38t z2>2SLv+(elJGJ&lHrEqDEz(3-d4YteuF4oa>b`ERV#j4ud`X9>1}J{}QTNf=#e5ID z*p35>8#G-5pSh}D+W8JC4B`yOJKL=E_~ zw%p)Y!l_!b1orgx9k4tbVv|(7aMo1 zlSN5b8*SiNf`bNAJfx=D#=#hSCi;Y8y`+sGN*lXMDwyE60E-OJL%~);((-#Au5D3bzp1KrsWVCpi z7X^QEde9JwaVKcEmwi1LlemRG{RR|S;gWIpeE$b7{|^{5A~UTFrO}LPq19h3%zhFd zkfGR|o<00ZabMv|w(TSN)U}*RDmLDG+Wu+|=aS2N>(N&3qs-XRWLNMcdHo2e1`5QH zzPk5NEpwTvsVEPZ6lx2-UpkRXak{+dThg;+FYCAZgh02Bbne4OVP1x0t^Q$iE`Uwc z2&*H-cp2d#NnB;KIJPvcr7ZY>ZtG?FDYSfU?OkkVr?nrF6pCd2N?03%(DJODP5JtZ zVk-4#Yq0ka3B&JtfKgcbFh`>7HVZ&PTz4RAg+OTXDv9-|L&I+iVs+t=s6M3^J|dCg z9L@DK3z_3UKN>}&k~41aa4kgXdec6at{Z{u?#Vq32y;7h%kiP4-2 zmKxHczrk|ec7BKyXW6pQt{G9iX_`)9jgWgpL2;@J_($VNkj}bq z^M0+lqB}dUEG3Y@d#aINt}a4TYN6~;(xW!{F53#GOHb2>VJ&MBY|YQ}2=f(v23?@9 z1jc(oW~_4{nCEQ-T;x1z4`ZY%@7+8*&9?A-<=$D_6%O7|L;3G2I zy`L;>4mm5K+Cvli7e$)9q$nhSSM!$%?p7pJ?Szp`N)PEHI=ZN2wMgVBS z$=WPf&t&)_L$J9sF`O6Rn0dqap3#LiC;Jw*vCt04u1p;XPm9S{jR~kJf#Oypc+)ZtT8YBxSO1JzvCT`rYem`9dXT z8$mb#AP2?A#z!_aNtt4*<6cK{^MW=c!RyxXHmxRrt-kuWvE5>P>2~tI=U-3l2jcxU zs?hQ6#P7T6m){OM_h{VIIIZci|1xHq{b$jhlz=(iVPCAO3>LW0xR&9PCP`G8%Nr_s zSM8WnFpht}8|25lJCqb0sLM`ri3fF}c~w4F(mD8--=1=>6VxDO%j6V%nsu3O2D~wU zFANlA#~NZ93keu;-WRHK{IJ{SHsg#Wgsk2fVg_3WwJ8AR$!)F3m z%)d5~70L-%)Im}uYmV>GoL#yp{Ue|9VUy~qRx<5kD1R?0pr*u(PCJOwj5+a^u1);$ z{tQpgWVQ=x?E&|~KEyeYwB88%E_`^|5I+&%CkkLvVh55O{M^H^Buk9P+QYx(`@!q5 z3H@tEcI>g+pr>K?H!_ci?}N~zkLGt6O*f$#b$q!QSsTt%+kKs;l<6l^Xu~_AQ?0kw zjI2iA-*|eMNvt`cAHRv%O!P5)%1z)s(pmh<^V657Cwa#xI ztwcWz>)^5hdE+$e`4i@j>fgzCTjR|tmERNlHmZ=1 z`G30}hY}Hs`=+MVr7$QboM2Pcks_^6orK1w0^KWvfQN*S2$D;H(JLJXUo3x$& zf*F@2F8ZUy`lK<4`EOzlMLEYbFk zxx-Q*A``Kz4T^n4-#mN9{BYVHZ4S2P+XFb<4s9} zF>c9l;~?3fOLaF;LQF1&(ZT}MI-KrAUJ#(;N#Aon$Zlq7FfPq${5Q! z3Mq2l-V^LJDfw;>=}k`2$buaB@mKt`OC;8YzsS)%$yc9lNBBD!`XMyO^=j7}0dh94 zd~O?`R+51`vh}~qdG~2O(K>+61ZSPFvNF3F~HDI%#V&f%&s3ibao)y?GQ=ZOgXl>t({Ut3w1HP=(;wz-?8-xtY0aFQ>? zpW<}uAjOo|E$TLW_~%mPfdXY;#xWCkj%Jq*1tjD)Rb0t31SQ0L+U+agIRB&7WJuFx zzXEB| zX&gA@S~Q{mkQ!AzT6^eH1*U2AJq5~j8oK%q!*&MnApBLo`#$0hZ|`d`42!2|d9aa~ zS1d4q%}$j+_N$E>+0O-DkP`Aw3oTd$Vgi#YykKhbABORwxlSvWqkv|#Z^IS0zP~w2NSQ6qf zZZNpZkPL6RY+%HR5?0DVXGtPk`k5C}+Mg!Rlkv?EUxNJkx1Ze8g8BuuT;PJ<_ocXq zEeki74`U}u<`>qpOTv3WSl6!uPD8x!zNMxulka~rC}#c0$RTZ&K;6dy&dVS0?_jrU z+@i#UnJt<_dlP2cODvXS8u2sZse7nW-}JuyoM$uL>XOoQsKxNz^ikRI%Ad`DW9DNQf}R1l~4(dwtB`hJFpYbU8^^zxc~a7?#Eh z_lcNmB~EEDoFogQ%B^2lkuO58ZF(#jhZYWRpdZvBTK2YjaHLBEV4{fp$D~N+6My6W z^XPj*jtJ*3{R!K8u!zVha53`cxiW8ro0X8>kTfcJ`0-OrnwKOJbJdvC&uVOkdb)i$ zL94L;{`6KRvM!n$=RE&{S-<5SOG;G(D8$l}GmhJ6rqu?{$K zgjNsrTYYL+h214Yo6-t(K0SAuQ)sDcm4AZXymyODY8m@R;8KY~Wx4N=fM)3fGX_sE zfN?xO+Qma>+hF=muH=*O^?w5RPx$8NL@S;rw|{`ZTH#lsV1dFA{|R6XZIPyv-Dy+8 zB|eeWM2Ci1N6gb515G09K>ou|_l4U-g(d9Nt*dD;li@*qGd<=(IP(JUDAETQaSCiE zzs`1)Sd8#qEMFl*pED2Vt^E4;%L5#J3#hxa81epKPpmCi4bE#dc#b7Wca~cG}-vS3qT-Lzuv8BhzsHzmbf(4_-9OhGC?}#G>xMS zoxbWQ+?_16KB3Q7CYZXI*xf1l^h4!vAml{rtVixd^!fRge#`l=YsdQV$_lVdR$cr4 zixo(2z?qWCA+yhg1}Wk_b}Q`CpoWMeoxwXN9eQPGI^PJ~a^L(A@A9wqD)+t!T%IQ& zZ>Oa2yk4Og`La^VS$-68NsIR)g70jzU$d&cCyO6w>fe41On_&1Jmg-pnc88NWwz(Avw?X<07~x3~R^sU?IFXMM-f|4;D4Zvc_%LuO(i)L*q}y{l<*!arnVMk}gm` zu=&*`Mx^`uvcA9(u==DcT;?74Nca&t#l@IsK!!KEtx2BN&%me=3CNAqr}wT(y{s-P zLx1D)?oA6*B4IA$G5=yu_>-Coq$TJ{GMrT~BcFuMpF8mVWL;127=#g-0IdsnX26Z& zpVshkW_?O|GLl-f+>zH_7+E-ijs`Y<=x7>xh4HfM>X@qjbeq!((cJa>%EY4S?PSno zf3Wh7YZ@Y<98k9Y;xU%187vS`2myqdI?1-CPPw+ZzHZz|FO;DCY#sIR`K^G9Gu_Wj z)8m*l(3{zpB9?I;WJ~-4Q)>RG!3RHdI#c~J$WolZ#}&)&$g^1p>&&a!WVV%5X#VMy zVc3e)DL2qVi1*uQ+W-vR z{5eXu7bELb{3Nr9dCoY0c5IKzRkY2leJ1*nKsr|5cz$qh+2PG@t)r~arC|HQAx;02 z_Q2oZ+0R=J2@cGK2&Jr)I1M3m737(P1;@d6WR~lo#xC?kZD6c9+bRen#iI9oj5~hyd3BlhAGc#b z^(PhE!{-i=4w6%t6jU30tYh1>3#UTP-qtpD{nCX%0Jn2BmQ;$8skr8fbezLxICJaU zeXpqooLb^Y>#%*$MT=ejijK&Ep4uxerm%7D#;a|V$)e?YrmFKPi!gUbT72q>*SN9V z?F(D5CtB4_{5&mmj?Ad8C;ELYIsKT^&u#=Tqatiy4(L)l+;2hBsJ0b@M(>TXCM>zz zGj8KAVSrDH5S?5{#N}hZ?BAnKolbh8{$BcB*S``{`MFczK{s?SHdXB$eR)udDzdP!7myo3Jz+!m;8ybmRhiq0 zDp%yzGAw1bUGB(=MXQiBW8{y2!G^Y9D10YxmSP06EN~NrVy(gw4#xaMYP_(IWz6tQ z4eqt`7k)CW|6_TWqF~h++z+^&1J@ftm((+(pu&iee3bOhjOjYjv&H=~5*1Xzq zAe$lNH}3(Vpl`&ad~3x*@cHsLP!9|AVU1sh#f<$L&2}Z$&I+xO~ zrQ5G$8Q}QZRV2J|;eCdG0j!+enjh3{&=l0=^0vl#E($+%Zw-DfA&}(I!F3Kk591qq z{6~!(wCtwVxE;rfDKYB^?T)hTjO{%tPK&uC-6j3vbEH&6gYUJn$TT+6L0H|GnJ_Zi zX2dIeA#O36!JmBd8aaQXkEPHnCi(yY-KeoS&QZ1D#$5oe=y_zWGzf@XiXPeW(m+Rj z(sjgJRn4+xs9 zP9kw1o~E^5OO1tSv|Ovt``ac5!9-Teza=@ zFbe4VAl`51*UFGJ#~E<=-dGJHbN^YSTuu|D{zBs`e*`r$QP>nstb1Mm0vm=AM^nh0 zu$}vL9o17tc;Y=fN;kji?qK3AMNhJUh`|Ix51pNuMF<&hSN?jR50s$n?9D2iw0o4v zHi1|UEAI@6^Yxo~>|TVAV&kLCvv{w_g>cXdG^yCU%ckm^aUe!r%c>+QU z!ywnS(#;uW>BPj#LyB=;ujb^KuP4%R!j}=G`WRr08QL5_5eF6mb!s3`PSbad&U&@G zPi*Y`w2LOTW^aI_BB0;h6qLMw}KyuM(EcOGY_gf?z z=jGb%hh_nr%HmC>hM{i%$@81LruUuNo%n(oABbcxt@)y{s3Dms_K8{MI+bXJLoJ`x z(gBZ|ZUyvN)9Ub3+2eU;lxOF7k~f_eUREv7eu=`fEN5F_Nj@Xw?pWxiR=v+cqUuHu zGHPoGUpY#kd2~`f_Ai|{&6VfE-NZkoTTASFq@$I2g$K3AkyqM?#}V zwWewdAe(O0^vi47t~gK0fOku*ruD)acS@OL9g@EF`QRMv$MrRFx3(93 z*l+RQOw~Xrd+yn)Qv&lcr>b9bWAMh{RwUn>_=t3@6j7}b9~f?lD8aaxKhILL~BywYM~SDElmARH~v4 zi=FG!#5W?eIz{JDCQCi9O8#LVwm=SY_{AL4!{z$T#G`2;5>>#=Bso|7`Tr0ufpf~C zR|**YSND(~{aG3A#7{j%1N}?X+U}KTfNYH*>es?nx?g3tOx87w4asn6cfdj#nFO(p zU@Jdd^82js@o!l&obR%(zbt>U!UOj7+u@p4n3ko&nTc#sj!q(AfU`@pnR+VIlMa(+ zlL#rPHG!_p@6g+u~i3@X7h(mFJ7Q%NE5lj+b3^{+W9 zvArsQfzNo+7!B@p(jqjTa6uC4$=H64TKe-&H?B}IDcx*@S4^FP!Bjg82cl?N-5JG5 zJ|B^57c@zH6In;;AocPB;=v1WZ<_%|orIEvRP2YUiJ`FAAIBEH=QFK(+f-jaX(#fn zVLsH%k6nJzAP|2I=zq45W;uA687vGqmf88&>Iqu^TQO7)!3vN1D$BE4KltVY;?LoqfP~4;w|!#^N8*9Ne`8E* z&njz7l{*2m?RC;3xIf9?u+?JRYH*&zx!<~X&lQfR?9s?mHOYpL^bhJ>J4#23ZqpKm zOvT+j3)As?^i6JU!chO#o|Q&KO*zZ&Cd|YtSNXewe$DAHtlU9m`L#M!Eql2RD}?Se zUTxw3VC-qSY=c|IMvB7TGhsQ?uJ?)C9N5a<7syW5v314(djjR7CC#zf3mIy!$cuW{X)pNReS-? zu6G$`Ou%u=;)n+qIeV+qHXS>g<2RN#MD=%R@TI6|u5XyEkIqs;oU78GuBoM`06N#T zHq#WoD&E$vj@RvpG`E|4FSHdk${`9>-gj!T5iz~c0=f!q`>^X}=g8{W_TR$6y9mIb z*&48IVx}}t%A<0lHzXnGc1(@|2@xp8Rlu`-B?Mb2LpJ1$q~}#es$x=b0gcWGAQ3_& zn8&gWo2TD@GRDZzd~pe0?ood9(t(mvhsLoHrP%(^zErkOpMj=gdVSdaw_A!|V&#_@ zCHOi4h=2pl*AvOWD3cKxlBfmGSdO8ph5tQYP)1ji;bC=pUF-(2E}V~;s?!t~2wh+@ zxlcOa0<`+3KVx}Mo$?$2xjKrxQ z_@3<6=^iZWmT%SC>3|{dE~Ef>>WeB(8I^8ekA^eTmED>&D5P<%iQy!PpTrwKnNlJ> ziT|4wXDGt}0nB3MHK`q4v+HYSRMzVo<2n^Wm;UqY+ojH_E;1!5V-1F3imS z*cR7qz{tK1s5|iqYCt%NIW(et_^E#h^@fj?Kl@ER!hE?KmBdS7eA_iVnvU9G{=vYibR$ zo&YNBDOnw#fIwbx%SuUT)HYsg6h|COnB)VhY?Q8mR1QumywD~E`W#PY47yl`eIw@6 zb{syMcTt-h0^fv)0AKmg5cr;%reS!RlQz(Iul6q~XdVRx|Bdy+&uwjdsvwXYo?0x_ zkSfnt1zYZ-^TYfZGw-dyO)!q1tj^w5p;X9;sIM<{5FXxbf-8?=jJU(I9!ln%|JV=EjHW$4_GT3NN{OZ=gP(m_G;KVN( zuqlJF%-EdM4!vGfmHxLwewyTbS|Is6l*dke|4-$Y6yOzu4U+3b>I@wI3u)a%JB}|~u!k9@Rfw`SnacqcQ13wIsF4!a-3w`|X`|?n<2e@p!cxmqB zdoTY5pG5~RL=dTz{=h(@YNzqzp$zMV0UH^pR3%I<3qV_*wZ_Fm5xtWw$RFZp%t(-b zw>^l_QT9e$ADc!F?r$j82IHDo&88a{>ynrVMatVOCh7vd;eMw%4!ym3e?ObeFUg2A-xs;>uKoNXDgL9~X=D zr-JL`9t4OFN1gmsq0NX>*t9V2?~tl-iC|^kml-*mu>p76Un>oiJS9^U8r50PKX10@ z>|>6Z@sa-0us|C2?9~?Xq{K=8ziM-VS3UUC{oB_j=mA$ldOK`(lvo-OG`i#5IyIy3 z8Ad76w!ZSRzM5Db2&0!ngn2SHhcrl9O|jvE&jOFxrF(F!^9TPJ@Cai@QOed;$kKKY z^?i#3`ab+b=z#g#?O7-(w9ff-N7bK`zo8=%IHYx^sK(ua#BYAx?IUV9)mafi$(Oxj z=}vm_@7~ij?x;pZz48u`yEj;x7cho2_oKpWp=Y3?@5?x8z; z7J;pK?8EivDFEs5m*`4Klldm3%rEzk?O-J zs91qj#R@ztu%)Fxw7hsAfM?yMwg5 zILtVAImIbDB|dhZjq|j+h2L#$Nla4))p8b43?!CzFl{*2M-9ss|6C$mSOFp;Ya4+t z0Dr5bibUi`3F_G-6NOmC<;y9NaZ`JCB3r0_daO9Id8aFUX@ox(sE7OWHX+G|#*P2k zixIj5k3lH;kY6hsAqKAe37{qSHH-N(V7wXNiqxgpE=-I-f{q!f%2ayXwX6f-?;@G+ z6y)S#LxPOII55E1`R0cz*5%2e2CXH{FVLh?$J@I1+&D2*ss9ja*649E(>K~krAxZZ zPQ$icIAkZ8>2;w*>^DeKl|ke4x$@w3h-VQJRrcg8b?=G_vl`4FZU;K&g2R7`y->Pk z{rGDMnUEI2+neXT1Nc*TZt`1W9;eaLUc#aUSl-fHE1`QQqfa3ib(>Yi+E|?wQ((?U zr*dgG{UacLyf#k-Kqpj>uV9m(Jvrjm!J^;(lV{0`G>8rX8do2xHvYFHsw+P+>d;0V zrwlyb_vPZqQj^@w9H{*7JYt-Z`Cf`!7iE7=E@dOV-(6XkU>*c;Z~?%n(;`xBM3A8g zII$#0heLN8iz2)KQ`wn@LmmBXd}hpK8e-HS*(W<$N=24x5R>d?lsyq;DZ9!twz3pO zb~0+L*@m)3$WF3l7qah5h_XFr`d`oWzn+(mS6amJwACglnul`kKBF*+HIE8)hzn-V7kjPc ze1MWOMy8p*If@G_;sBW%KjfZM7xF>b6cgA}e$#8lm z+yST0aTyh6O3(E*YBT7#l-w94vRU`89P_i^=?BEUf%T^{t^kk#Caat5!>5%V5P$vc z|5(u;=Wr9gw^->tXl0v3g5pA~Qrh)p{f|SBknb5wjVthJdPLa25uNQ(oA0k$q8^G{ z>Kg42NeXK|j;oWji?vM;p4~2(d13MC(4>1yObhJPz)}RH;NUG5E8EFcPXZkLcF?=T zvH={sjlb?{744g~9y-p2}-T<)cI6Bzk3+ z9mcf=<(=ev+MlLC`y4b=H^B~*Qh_3!AKt5cDZuhWxw;YXRwmbfFH@U6ZrQDr02^Ac zPO|x7P_=2qm|yBoboGAq55E=Wfc}D?TlZvlKCZ2DR-f{&J{+B8{`AXJ)@gG~6b&!E zo0JN`G&k{{A0v*_uj`7t&t4eMQszj0lItcmBMWkxS0BL`ReL@t_%B*FJvejJWu~gZ zus7Y8Os;}?2gEtmX2onFTxV~o`x#puq+WlsSd#d==@iyZd3m{x}S3)JB($#J4Rv!d1VbZve z>z1;7+$k480}5=Qtfys_a5d*Mr{)uNszLt5;zFj5M6lJNcX8#_ZA_QFc1yT@2khL# zE_^xHngRf2ir@SenbO792mry2aEkk{;C5H;2I(BarLjDk z3U#0^)RK3^i`g#}zB9Gx*qr)#tLVqZ4J}$YM858f!X6KQ{o>=HcQ1wAJ)VCbXB+n` z3N)E9Dc|K{?!){s?6|BP_g2v-e5!KS%xq}pp?IPY`n%WqIiEXp4#Zmb6C*`i8(OrE z2hA}xclX9Ia_FJ<@voM#=g~O3nuHpuB3D{Q>6gr-zZ^DbPy!`*QQXd?`}_i2VMRx_ z2KVT0zV`NC8h`xo3;BlGtSl-V0opB{IajVH7eKY`8AFKia6#COr|7qdpj1y^;>zSs z04{&DXBQP&=8NNDOVQB*J8ALlO!-zi@vrzQop8;zl{WR+sJNKDw&ir;*;clt9DXk3 z&3v$apTr+zo<*?Wa2>Wq^rSa!4#^crkd zdydsbb2Gu+TC3uC(RH`4vG{?9x@=3=W=+4nIM=Dv4z4_O(`Pa>ZkR>g>Crnjx(^vB!HWs)I0RR7OZA>z$3cF}#>^X4peMvKy%wDX+P^Ar%y>!hZaha*#D z<)TCDDBW)7zwnSOz@op4PP1luxK_8 zEGzwbP+LYty81U11wQ%c`fwTUIwdYVA-g#^@z~p!Aas+zN!^)Aq{{mBK|1wL?OoQ9 zCr--kBl;KeAVL?3$^?O<*Wm&jxR6U<`~w6*fv+qZz5a~T50)S&k>un!VbW>$4I9Hs zemUDCclq+VT@)*WQDj^C1NH2IG}IS=(%%v*!+LJ;1ZY4GSi<$;AV(rnodYGS_& zq&4leV?GO2>LFz`b)H~CRYFMu*O2RwwSc9aJuaFV+mw#8FpyiuI-Q697GMc4#clx` zV~Ba3d!z>ivG&;6%X*+}Exio?vJ2ZE#3_9>kki?$wAq8#`6Th2w{6P6p22 z;#H#Ek$`dTZ=2w?yOgE5S7j|za?+20bS@aZAo?8q7ry^ja*9T@dk>X{_$hA+Z}bvK zI6q}=l4rK`w_ZAKkJ_*I{uKFxm(ECnKtY34Yyy}1n;5Z zm6j#=$e37n(n z7=JJkJ%b94PzrBa0_r0VwdJm}5;lpYU)vPUaVy9A0$}r4t=z(ZS15@l(`ASiJk7nh z5UI@%>bH5oT5%iPEDSw-#hsdT@8`k2KV@8h5vaxuN-+q16vX9-`G@UL-zhdfbKX3% zl+$T(o7M;cq+hPf`+bHNF?d!{AT_f)1ZxxrYiTG|o4`H{QHr4-G-u=GUt49)L1vS# zs3kvgA)a?P-{<|1!g@6}x4-mf46q>|7%Q(hWT*Qo{|uvqVXlE$ppN`0Y^q`Reu+zc z+g)b*rVk9UfAvMvyfWTSvt!Ezz9 z%yTg6p(NQW|B0@g6JOs+oP)C6RX)bV@Lk-us#-KcSIsAds5?(Hr`k%~X^;YOX;%x(G z(y)2HWtXuJT{b4nuEqIQnZu2I!*g#+tFvty7h@_AA1b&4fUie!70;F zk@K~kp~8v3qLsNsXN=d17Hb@i@Wh#21?Zq#1+3_d1raJN`-AF zs0V;R;`-irVDi}9#xbN}(feznMMu9TryDyJc)9Hj?N}feShNg%x2Bke7e((Yzk*(-83K)9kFB zrSDYkma(54zKJ({H6y}8rr~Voqy=N;wNFk>l(clKUB{r*;MO)so~kU8@^U(m34d@Q zyjvgeuPU>qjn_GWt>@1{{VL-^3LlO^>xxX^fCpYJB}Oi-gm_V?E?GEe))J%vSRw5& zZ^-lKHg9N*()2Xyl%bYiFpeGNa%Ru|F;r>jI)M(p^G}CbXcu7T^{`jg*=4m{=`O=k z?j+cn+$s|ry4Xgzs2_}D3nw|N6z^0=yEMnP!5{;wWmrpUM24t7*lsaT)OtRw1lfnq z)?exesgHy|MAn8F>Um>5k?t43jNH3p#iKNg|8xjl@K3{q1Yb%>*WiDR)LlBE)pLz* zm0ahlKTKW+%aPaQqA_7#CvAWk;PQJXT;!IU9dfBn{Wp%|Dk*`t!Z50#5VdIF3Wz_9 zHs8n_U6`#}U5u|PeA6vPu+dE$0Ern#kXED74EJV6{Z79{?!=)dP)b7j6d_Q)CIYQb zvs1cVM_(SS ztn6AnE=YQmM|jg7K%Gj(D<#I}YypP`M7IZ!zj`e7nL`M1u;JE3JEoNnUw4h%Dz%_@ z^XPo%cw>8i*GfrR7pZqDvV}D}@HjLq1ay2De)iP`hwgo5Jg)7YtaN`B7jVz+ zxh0(bE&636-VV{(##X%VV9Lax=ET5c^2K)`X6kq^_hmLA{%S6Q z1L1RIxxV#FCffUmPheMEVIdXGqPKSy%^$lkU?t2qlUQAr%5da%jAiBrTVGX#km50|d! z8=?w$MvZ8=WVmw0S6syZj#A7LLnP^mLURr6mvK+eK2&NU&dN!%rba2=D6-w)eNTa} zS$vhkR>Y?ep%ybu!>2wPXy0Q z^HE>V*6OM-*G5Hq?sI}Ik&MiHx1w$lmLt6`K>lAp;kyVumO`Vwgy-PO1_%VNrmdQf HvkdqjPm<7< literal 33763 zcmce85D1pEl-Oqo1Rn1B2NfCoWpvZZ00M!7nv04m zIewP>L@q5Z%Ek7Pi;Ib!nGFJYALSgi%K$o|4uuFMT^z282)9fXF1!u`KGY%Y$~fPucO?U6fFj!!#qZ(T`>Dd$(JJCk(X9i?AF z<*fEaPoX?{fG+Ot3yb~laoG?Y&)>~#s4KmJF`D^FzXo3#vU}RE8Yrp%?Q=~y-RhSr zqo|f3O>@Sf>SrgnP=NJP^@vCGOGVY+KGl~?c+IGk9UhCUhnTlriA-R>#>l^Mwo2~C zE%g0S?8GVLh?ql`O#st6dRJrJ+3l4)iOOr!*`{@< zX`C@3iE1;TvDonlcRqMiS4-BaWrjsUdL;}@f@UYB=?H;f;6DGrL6TG7Kp^B0X)zI1 zx77Vb*F@FHn~syC^Mf=b;#^lNok*J_&dV9g{pm!rP$r8|A;amyoZn^2h7oMahMmlX zg_Q4uf`Z-?*lE^H&(gIEA>)YQ*A1mPnjGG_r}Imv_om%7@m-}~wQZ$dL0^7CeHZZm z^%o>z7ZUJt$1qVHjuod6(ZcWTS0*I_Y@~I1L)=ff8B}nejOe;*dJvkUh|eWU?~Y1& zZ+h2-N%{l6GF70u;kyYCqr1$ImA?FhBn=SCT zq%Q*f$3Ez5qef9gBcetbU{1ggnIXf-!LI|Wi5p(Iu@a-#(S?hnj>$j&^E=Fmx2QUZ zsmTB6u}uw&{pO)bt4bBh&a9t;%A%-*QU4I;bK*1v{QD-9v1V77i{F6T0QNprh%JFy z=?)gi0>^ZLFe6~2EChG$l+mo{t!&!8Z;*LjKtASqfj9Y_SPjH8`p%4WquBhI3oo&e z5}v=m@)g+qXn=v1XN=d`vbZxNR}h>c{PcgKo&UkUzxqTajYA&r>4$kERMbz_?`LqA zbRgm}%gUVH`|HZxuK{+v;L}PStV}F+nEgn;hIoNF1B@#pt4haAlYXeHs?TSW_+gF& zRe>%eh`tWY33(w!gTMmA7DFun=L-73D#t*^gVFGEyPStu_YX=LN_spl)_+njz_Jxl z23mO?<VS+e%uJgPv6<0>0;S$*7~C!!DMOczS(+xczO!x6P?_{>KuUFCB>d)U{= zJ2*QIgx`ZJEyk7zvZ9cNzNGt_IAs<+e;(FPZU8vEWHZR5mq1 z$YvuwfymsfKZx$yW5s5r4&1l|xN#!{u={PKt{Ti}EFWy~?^A6Py~qvc3DoB-dtS%O zU%VYtwKHs2x+B>0*Ot&1UM#%EM)Fl634NI)2VBhqoGeYM?s8GmnQ@PVmnmW}oVjzc ztNHd(gDC0dn*<4D296(HhYZ*cm_JC43hZ?e?A3>A`QEfJ+*q_dF}gRdH}3q_k)8Dw zBnV+7a-Z|K5g4nF3Yeo32^iC_OKSLrD7fn;sp=E=h`zG4h|Q5H@#&M`qotPP>Yz#-<3B_VU zut6k65A;8Hc5BX!yX60rx>-vff0!@AGoqA{f5(A9%?;t}d>&O(g!H4{b;x~9GSDI@ z|4>jSDy!AkAZ5&R^ct%G+PIIMrHbl>dCYg1r)M_x?*U z@0)ky@?cMIewc?*HvTy>%iLqS_;4{-`|zHnsy>nX!e32{m$)*$?A0}coh8rfq1z$J zpbK!znj@^GKdGAU9{d?^hMRSd$MoxdS)TqzCw=vi$~B0QO>QsB8ATK=sx~&=g(iRm zbAlIYWVvh(GwgRtPbU?NylLt9*>Up2{2|7O;8(?3Vt~{~X?QX9FVk~ZQqhi&^9pMV?I5ePM%)j8);)Pm>lj`^Z~)G zpS9GzBg1d&S61eIwwMG6WTV$zHNj9vzhy{VaF;U8TiJ@bcM=)jJY9KttxSbo4YSP>$4uS^_{zi)&8_wQTNgyq5b8=Gs=2DAnt zfWAc_ddebYprc^>BGCmmY1sehuq7Fa_r6&fu5Uekp~XA2d5gM$cHhZb+759I6zxRr zW<#sg1VO|E;20CYF&ERFo-4o%T?V53@26*uCuU^sQR6V~-z1I$HXh(E=5y zKY7%+H@l1d-bD>1kglV8lyYYMl|<~y8>H0B$`oOs)eC>-M-Yjaseyy`rEV_0{-S59 zfeLb|oq_3VmN-SU2mAIZH%<-(b)GPP14RMe=LMz)v4Xm075hMgNYOI?$pQT=VWdej zLNihR#jAeifG2E_Ji%?EL{bUQ^stuluVAix`U+jMtBTe=yOdQQ@PLTZ#FRvP8zfF$ zbf~NEol$}C~#TwTX{$0P`5RDK+6B+6hz z98IZcNEG`Ib2a#Oq+vLacGpoYcMk|lij4%I%s&vW;57*IoiXebd0ty76UX#{EFGlB zA!lhBUFz+N1k!M?^8VWrW93X2|BliQW3V#Z0>G-h*S%C#P^(w31xymBP{Ff`5U=1a zd#VJC&4oF#cJA8xQxOoYG9N{hOtCuTR;B;!&R9BPO9zWcHj z{LUotxM+!|^J-#-RXhR+HSjMW+#vTJsi9D^NAP!uCjd_a@lD|8PUU$wcqNHo`B|r_ z+&WkmKJudj9PTx^yFkDl@_5OkG>zn;GZerXXn-?>?0)$@qL$}opUH?mBqg|Fv?A73 zlTJAs?Up_lDMkaX-DqF!!wzrwTfpJA`tnE59g4LjpPePx6P%$k_8_qkMkYWEOz0 znnGOB{5sYIl09Hh4pISK(l{&6{ZGjXU$r7wRE*1@CG=1g+3=a{ak4u_# z_=*RaBr%_$Dsk`{@C0x2onP3=^1Rl`%PYh5vii1L>mO@8w+ z*(1@MaM^qLa8F!vmV$t$jRc5_NKqPGhhm> zYl**Uq;Ltt01^HT03Ci%iUuwk62!#oQtzcxGzga{sNN(K61#(-gTcrxx2M%rb6bcl zb+3v}Mv`3sLJUI(oj6R@RE+!eR`bmWr#IU5XIBOmZIX?IX*KD)uLgnm(Gy6|v5N-? zMl|AkB9HjlD8qB@krP+44+fF-Pt|_yt?y6<(B{RUz0kd`tQDNePM#!8 ze)Ko_wHWK|>0XfD3lH~i~-yOhe(JA|R#4K?)>0=Z~J4P{*{ z8Zj4;WRyt8J4FqWkUp!fTTl0Gf5CrSGp-oDYj!)LoGomMIB-$;EYbU)uCsQ9Q^leo zDFCC9!^uBhA7%0oMZbGn@V8oWx)9yZn6&_6Ed`qi;4r~pQAlyER z4=Hc*HxR;6nBgPoHvwl;pxPQ$u(3(b23=r*1TuZD9r-Gvcobi<% z_OW*6XAr(2vyIqaRZihSzRl}_i-6Z4%6Fzbwgd62L98+rNtKxZYAT2iLMle=du&M_ zVG$b-1~_-~GJe#tpIIIHehgR>O?cOUUshpPu=hPAPAq9Hq1{ z0l@Oee=?K$(N!_?|EV-RVWAf`px*XrL5($+~2p-6?X3Ibct8i0V4_GW;QVveihw&9kZ)S zc163wPBhXTG$r$gSEegg{~C-dk&OSd0gJG@FZ$lb4_GN{bmLYy8Ka!F$ zHox<^*K2ehfA?0F)zv=ndyB%D*!vM9t4>7v!I=djdcupPF|DU-&5AG3}|wE}?zunmn07y5a!618>! zeR1FVU)Az&ojO8BYjP*Nh^~U=8jihcuBFsv8TlM4#O%Q~nk3W`RgxfV;tWK9La*Gg zq#?u%LcQIG^Ry_BB$CKR)V@D{(ymn7&Lwe{hk@-@)rsK1SBlF@(ZI*=3adIg;Z&5t zje6y7wXeGgFuTmrbok(&&6w*z+DFFfKTQSEu5^hX@ZTulI^x&HL@owqJBC0{6of+i zc5u^NDbb)g0J}bVd5}sT#wr^BwAvWtvLTQoBKkTv0~svN1|Q^L@%j5nw%G__&9nzl z(=sKJAenIQ2T(&Kbi}~cU+%EwW+WK8GOa+R8xjANE-NQ%t>2%0$4~dEC$ua*Cx0n4 zCQ|jbLdlsN#&c?JmZ(nG|5-^NT+m&E4y&^qblrvF>uDi$f5WxG4Swk%JB)V8W;2F5 zybKbOaSKU&g^fhgnE1PB0*?C~n(!3*wGJ^4U4o$t)7K!Y1Khm^-HJVzkNZu!*s>+q z>0Sj%2}0)70vgye>S^4M(J49eTy1N9DriL>_uG1QOW z26vO!_=O7oD_FM7hbiQv6PLif+55c+A5MgdHF5izb^qF`o={z6W{jPV)!Ge5LScVpa4D!y;hF2`uh#6lPGRq@cZ7c2w%NURlW1arCFi6k zg2E5UqZRUd&z{X7AS125-=H@N4!FqcjmDqdI9}Gy72AQ>_}R2~%>DM#A(Xg1XqbW4nEDiw&7AR?+tYxkUT`_G zjHQyYVbSh5mVbDrJhH9okK)W%dGAn9J)3|3z0eMlH6S;3xMx;au8-_8{imEbfARYv zPEo7zrt#KE)f^im(0^p!`&Akb4H~Q5Ad(jb=3)N`!B%tQPR(bUP*kToQ$u^V%~q2` z_eNy}vAsM2m%i0hgpfB28u<5UcTCzO)aOUYOOGw`&hH6rf7uCXkZSyb2B$AJU;MH6 zc@i#N0 zkS!9O-wh(IcSNXTxM%Sa*MmYt*K5r|yGrcggGi=_8hz!LO#2sHi*-`|XHQy&rWB*d z=inQ#Ty%xG!;A7RGn`hbJHi(qKDLi^e{V0{c7})ams)y;k92Q2pX@}t81>i>BCC1a zob~6WItynxUE>a%Go0D=)$mIodV95Ii-vzV%)Tck;7(QV0iLEZUeL8CEhwu0P!rdq zuAQE#x3Z_^Xq#r*8@Si^N>Qz=rlmtvSkKk>vIHS`#xQUpwtLN?^V-0t2kZO<;FFlK z#yHIlKeNJoeWax#v$0ZM?Bjo=KhDn_ekT=MHP6H(UV-6TKV92^@pD z{~gfmvkr~mCY-O9JYbx7Zd_5G1BZUaaq7MbYa^~yu~@RJmKTo-^c-QSFV&p>%LZo> zI#{_;?Wc=5A+wm0v6&i;D0kHVf`p&?MR{)PCm0L>-Cz5e<3-PQaGb%2@H)Sqc$-TNDT*}?7d{h z%?VM1j=qKeDt6pYO{m!i!YL8>oi+1Rk)Wp(u7G(!K~%kC`8vv`mKr#9gD-jw6YOV5jxep%kte`a82pz$(@dio@_MK z3Y3^qnToEy%wN)%9L{6M;K~GwUe}ABLh3QX=S!icPfF%n3+XS-Yuo7Ua=T#g?r;RI zsk-Txk0hO>m&DutMSC8DMy(}^@Y^DIkW5AUTVKv*+l;RN0tpHpns(IHw`rVgJ`xbC zQ?)*dPxWIoH2f?4_UNJavGa7l9NlffuB*s6be8u)fIs-m%F`zn-f;icE{*3M+P1Q$ z2lMllopNyh^Qbw!!<+uzV!aB&p=?vz+a9&l* zx57_3{*DF0K$_Al=D+Fo_cDXoznxASI{6EOnuqBMN=DK2`QDLFAFZuXhBVU37Q>q! z==XK1G%fm28}IIWE6CzR?qE*PBS>8RGfgN7#o19(!Ln;y*~OJfWas5uioc8VXgIn@ zSD0-kFJ2A`*d7XSR`WxkF;;Z&9V)v?ZPU-H+N-ajO?)aXwN9l*>w}-zUcEt;FSTE~ z`J@}+o6*~CZ5qqwEN4Os-)xi61>Dcz?zJlxF2%&ZTiyGefX_azZfxn8zNe)1+HmbE zEk5Q`rlmFWciCJEtfex1EScXipXhCr%h?+Vw&tedt7Pv53F_1J@~zrRxxGjU2uPHN zm;KVf+dZvH9U)$=0hHX2zVdIpj*VR z&eoxd?b3s1cD%=4yv(^WQ#5ct%x|_ki~NX!yH^1?e{|vY9DcD)-#*>_ry#+8?4h=$ zUFpa$C~Vco{qD#=e~L#|LsGMeeSn)G=KI6A8p&Ty=eE%za*YE>^W*K<7k^s~+G2R3 z(*}dDc?nq$EukdarfaA|hRC-ZZRgerkH(~=O_}~e53S)Eg38*?`@y(Z?^nFAKp6UZ z&wQc~K%y%`G-5^AzMxMH)U!2O{@FO7QA3OC_nEn=J^l)RI|-GTl`erHoT-!6_Pfvo zzVz|zI4T*Fjp^Z9Qh4BkR`!998167mCONj~h;hRW0p=6t;Ykd%0 zNyIk-9f8`>{!f4w=y-mnGpxI`zr(=#%_)#|{jU|B=jf{d4m1zYn0pVECT7z&`Ej_| zNEcq)`NBYKqw8M*mejd1V{%Q`E%P#N9$8<@*N<~JL8z4>V6qv|yd$93r7KBsBgXe> zw;6qcrHv#wkB$vCp8FFE(8Dc`ji-OTOgMN-8=&uVHpD#R0C^oSwl}ZNKjzDBZW6b9 z!cqDQGP_sA|G;l$hfBkON=f<;l9ttaimd#&r(zg5%G>kbp#epJS4Cmke7f!EH?~l) zhn@+U#_LY4rAHlaij}LThSLU|%alX`?FCA*CxhuzT6X0ROFNw7RoCUrmu@9pB~SPq_<6T1__z=v+!;BN zHkwC;8~wbiwPQG9Hp^j>XUZ=ZD2nyQQ`fOI)Ws#k)yuryiH&ut3dytih>>AzQmP{9dQIH|?r+Q2rWbAoHPPkd+2 z`JYL_*aPQZ8Y(GzEe9oknVOA@FvvB4>8PPtO zI~e|R2Nd+mRvBI9Wi?4sr&spj=>yz_^v;`%xna6(lk)B;ad5IXf%jX; zH0ZIVd$%WTZPm=Ae{qgGm4U8tqE+&=;M;5f!+{2h<9^xA{zpOXy}Ory=tn@OyEh{K z;#BVxH-PL<7QygN@_SC}vi?lZh2nHIj3%Eq7E2!CP=h3Z~V?CTivNy!oGve+K5b3~T|ep(!MAF-%~sbNbajM2h$Q#OoIr_AWWKB2`~RA?jZ%@t@Jam7VpA$c)lC&muiH{-E3*w*3m#4Jx> z92HCV3*NLqL!S7GVs-2ONQ)Q=Ouv#nK5za5jFJ;w%eANk1nG%tcCN6$ijTQpo}>xV z5%){h=0G-jyfDC!Rd->vfJWf_XG_y_49P)e|1vA`e`f)%JzA{~R_NSylDg447P=2~ zPU?9{Jz9TqUTDRU`ED+J4~~5zNl#hK_K2FI>0aw!#29H?9)cym0ia8#FKQ52dgnH4 zP?!N~)UmOU^1VyZhXoIb#_@yJpNK7QZ8oy+EC5tU1XguF>2i5aE@%@JV^d649gf)~ zPNj6%z}Al&Rx5UHfYGwXg#9rjI9IczmduB?cUx#dUb=Z!1Qu zhT{J5O%>|apOT%LcWN2mUIZ!(QsT&u;!8gk@GQ#%uBg(37;Ag*0Q|%E#`ODX3|sR~ zKIm+FPKxAesS7M&O`O7CKuL*oq~jM9mWrXcSB#?$;t$j6CVJ-#l2DG>R?OW(hvZq-0*bukTYp%=KW#w59uk~u8a`bWiuhHgt2}2k8vZ@gpeE?D) zT}$)YZVG;Ol+AD0ZB$Zgl|@}wa`j-1{I#6EYzf%}x>$)Najcr_e+F8(ei3TDm2`kH zG_K9yFx7L_x>7Y9-28arBH{feA(%j%*T(UUzO*dNe$ervrKqCon=K~n{4|1rUM;Zm zzpF{Jv9hLSpw#g=K7I0Gqh=`vqGxs0_i9ngBJ2@MB^J$MIU&lGWHM{qTA2sa=j3$$zk1@>rI5Qf~#3`TWV|7@RreB-hA43Go1u09W z*T3~N5cgo8kChK~Aq5&M!oU1U<92;P!|YucPt9-xGMYB~(7l5m0G_0w#Ah@v}7*|tfZmItPpyb?wq@hXGGxO{%zx9V$mO^%=UCgWpU-C-#0Unx_!E%t@3ddKeBT94uU0 z2?&++w}@C8!d|@4V!;bUEvjju9}vHqX7$#=PI#*N64Jxd%2_x*NMf2$jJ9?AiLZj9)HWjK&c)B@xd*mkr8FQ zH(V$v;U+MN74q5m@)uWlLtUEwQ+5!g$v;#14Os$Eq&95+(XLF>!5b_H3q%(p3B zL&$m~*iiP3Y7%FJsB!W<&cewjgpofs3iIwL=JBssk{6={T%CTHm|-#5KVE+!*$SBY zre=B>kR-GG@Mhzm_7v+`cTb5dMA&<9>7ptRry^@4UV`VgiIe|aE5_Q|J#FP+WS(bj zjy~O6$PyOR{_qai&X_o9FY>7Rx>Ad75WhA9qJAufHm*ThAWEGTW{swee*# ztl2+1r$W0olMwIvpE^lF?YFBlL{lJ^O`I0x-burM`0(HUjVoNVwMay(0oE#}xn&CT z^QC*$#D|a-PF4lB>N+KbOjCzDU3+N;tlNC)k|dncWhY5@Yfj=gDm1bF?s(XV75YI= zwp}1eAeITibgFbF;GdtE>7wSJ9 zRT{G{S5oqLIAB6dp!`q}y%lzAr>` zl1-t>9jkg%dgs+SeHWj#ZbuLA)Imr)2q8(TfAs>(E3pqxi%|%2N+e?RMqP{hg;bCg zvg&F+v}f)6guB#Whu(Df$#!VNpFeEin3tN0{=rcJgFrwjW>SD2$ZRe6@nS8wnrxTb zXGS}XC7OA&YO)&Q)h7!qzV-0)fG95pb9{F#)|4e;$zfN(H zWRY5emxfmb^cBLYOks9&){~l=?D_{C z|Me|K;<03U2&I_ zRNZMSkEZ&H!R~`OzPB^(P56YeU34?Z>n6}eu>Tv^ZgJvb;YlWESE(j{knPuAe=>A< zlDqfg*>LU9Bm0>)*7C!*oc-b1s!+1;?3XrNpl3Uy=S&O}Gel$dZiuH{?q5iQXPA~q zs;-cCelr&%BWz+GLglu*xEZ=A_$Hz&gS33zPlNj7kw0*@|6Rb+bp>nEetRWX0E6df zg+hn$dfLiH*6v}#x#j-kX^CebWZ~Xn3m%Z$&zfs4PL6M(zxcW1x=AMu?lp8BQ){t? z>2PUr`g_#WoY~?_LQntl{Yg92JMq*_b+vodyPup9b!F%&=^sK?veAkZo$9I#Ux*s$ z&j~}=DZ;!@rU7k#q+?pJfwkiucO4g}pS!Fc`=EwSdNAf|7pMuU`qaAQ_20}FHQmr+ z(qh9yWLiw@y`v3qGQwBqbfG0%yx-7<8@!n^_>Rn0mUzL?SX&`)Q2HqhJClID4KqeY z`>Ow+5cNJETY@+3CJb9Nh`EUIgcZn_K*xCYcgcg;k(>7Szm5iHuIRl;6R1`MI7lL? z$kQ+<91v90db2{_J+u&4Tej`82H2$v^lq^mrVu-@yB#l&9$~X=c9rLfC~+G*wL|6P z@9N-!RZ+4+LLwM@_WHvEBVQ)b^lnjT{}=)#yTW{dlJlD+6UnX`c1P@>A$fQs`mbrN zVLC3vgi0hoUWRYNYt2_v;sKq(3t7#?DC$d`P6vn~MPmz`$gzzCRUlSfA7p)g`RWr& zBOHOeby5cEH9s@6AnXfRmc~$Dj)N1eZ${sZJ`IrXIO2CX`Qv#zofgg+kC8&stCD#o6jld{8HeZv@mk?^8$kj)Y6a+wq|@*ui{YM>vS8! zk6h9?kp@O=nJ;fG2cqfVB8Zp;U_6xCpSq(MrrhQc(>v$?(z z`|A3ruJ9aqD~>)%gJ4D0`!@Okp#CrE&kSpyX%mk24(hEtCV|1V20uXu4>uceS#5v+ z)Yu=eA?&VuAf7}J2w5nT1Rt`qqm7;`SD61WKY(v3<;7M5VRHJ9+MBo$f8}CCt8ssI z?oawq-O+EIGTyS>X~(Lci2Sz_xb^L$6|=sFf4$LpJfA2Q-cC-ml?OX)rcJ6&IzNWY zHAN(N`sga?heh|BVcPhVrJF9z-xnqN@-zjA=5Jiw{;2`=|9BvmuTuClR(+c0a82c6 zhR2t!wpstFzp{0u|k5x2epeyTZ>8@}SWlme(1FfzbaYZ07e&c4q zXXJ}Ng=_NVfWKZ}480{yniTtnp|md=&w?$y+e(~-kE<`!AMKgqrBR%%llo*jI=kvWxg7N8-}k?vIS1MSDav_YhnW{bm8SyhyAlB7$fKYb3e^r1xmj>d?j^Z5Wt5!70^ zKv4uGi(#3*s5h--tBnF9cJD&P4_gguAG?`RSn_*Go#r3miE}h;Pq!8SC9yak-Cq%9 z6m98tOZL=B;1K&*<5?0lJzi-Y>jq3bR+)rIujb!>Az0V&EutB}5Ml#e$Bkgg-6r2k zM#t-O{(%s7>prRGP@)|6O%{v9cnlP>x97#)7^;DhF4cmT8xzbeBsK##9F|RsBM(bY zAC3*JO8^bB2y=W;xmbK`R4uX&J}c<;%tl^b|2z0}=q@FL zcjgNP9AJqqPv58tas>6(_Upfbvg)TN&(X)fU|e~|j8O^3i;5?K?7jbiSyr9US=mwB zUzY6f=I4R17 zA{9V0i(#&q$7I3;gS-LryVpE6A;X|KoT7_3gXfzvdBNSyz2z)mussyw==nsrI60I2 zb?mBDRuI@qA(!&u`#i_dg-`8Gs<^Hq#i4JW=L9uTh}f#7tbBGsh)#RRx@(L@hsi(q zbv2YRlB8Hfp3~9HPVd(Jw(n;iZoc=y=8sOq_Ag zW0jWeJ${g~Z9Nk|ipihFXucHK38HDLJ93v>`uGA!IuENm`0D z^F@PzOMSAy#8R6cCqeGH!%~u}UGj>NXq;Bcrk77IRE zPsEcw7Nu@kOP?j3YR_#>sSSFvX)NIu&Dk#DOTXOvC0S}MC7)+Fc6$Dp?&N%4cJptG z=lCGkk_xRtMB?GI%0_G}`iaqi{`>!QIkDfJ<0q)aH#9p%U=~XKA#>Y0NUfK5RY7-LByJ5U!P*w{1%}+rotGp0Mx1~M z2D9x}jKEf>=B@9Ro|-*doo=j+6RbUbTdk4rCIp>1O!oc~AF`MDdJufLYhkAUT(RX< ze&VisyY;y?+q((D)7fdWVexdMaCO*tD3Mwkny)Zd4aqP$@LNut?jbrGpU^S6%W5TC zFbO@lts5X*_js~fc5XhmUEkMEPR`HBL)x(=ZQ8nCqSfOI-`7IwI`AJu@OVmlTV0!s z9rB`e>UXe3Yd%Hr*&;EU=oGROg<%ilK@1pnbAX6~t5#Lt-W)B`_3V;PkowT6pI*Kd zjbd3hvn+tugLR6_c(gdbbJM#h}>}?(T~6u>5t3|D94xnnh843E9S#! zcy5(mCjs~7`^0EHUE3jHvhNcMb$sd2z4S6#P)Z$oWw~k-AK3jOMMCuQXe}}O6b*fy zj9O@Gmo7njs0`JwCffU1pQ5RWO9?)cQMPqRaNtc222IXdPD!8cckb|6E2{Qh>;rr| zFpayo_L$E2EMriX+@9oaryL=>_x;Y}qYXEwts|Z|Df35IL$8p~1mn?^ftDf4EsvGf zNX#DJ2(zMPdpHocK?Srr>nwd$;$AugZg7#j=rg#3V?vMC3bSwEbqRqdhXQ39Faj2% zocBpNJ*A72p%SRY?#HzmE)$+BlM;{TZn_ETCReHz3*X(9rk6LT_q2Cc&mWqe9D&$B zGr#Tg(_<|N;`x4mW5vE}=#1XuyxKKohh&D!<#(u;1ah-}zK};k`oqFe1pVO{=@sZM zWPJ!rJAJFYp6WQQwUqiau~QEQ?YGC0&|G-!Up(Y9bFR>c&MnB|WF`S-W>G)02>^1u z<|mUbi`UFA+oRGfrfpE4>N5=eoz>B08n2u54*9#wS9Olc`j61J@C%yE-OO(FIWl5DSMbp zBXIOw%|_FphP(0!k5vJJ-M9Ce@fzZdws%Yr^%aE%YAU%AXP7ic){rf}3`M1Ynd z?Y(>eOo4a)rnSF*Zrzn-dn&4&$g%NxBnCS@x|dJC#1n;1q%J}j^{Xf{6?uX7JP4sS7oN;|@xzyAy@Fx0kd36d}k)SzK zz}@ap-rL;<=4CiWdxjH)_I(D&yG=ah`Q3}JQM)|j#@FhC=}k2eyR1HI zpnu`E6~c6c`bKq!8uV1P<7QJjYFw$FYn{O(%D*3IW_$`GMB?<&9Tw^^drA+`N@|ty z8ILk4lb_EGE-IHM1n|mT={*(R>Pn*@2MfPfTEl zas!&xhO*H~Qo`U>k0V&N*Q0enTOMhT)j^JJ5lVag*7RK)eNV^0;UWf@YdTKxv@qNL z#Wu4kBHpU^B}wK4k=Sk{B8_GTdlLS2pr}Y|owJqL9xZsKpaA|wVK}8O=r{(gf8b4B zpP)WSwU?Y$EG&M1YS%k!94gbP&iIa9(=`TKFJ!x36=-RGv?ihey3Q;*lL=LyyxE&T zQGHxpE9y@Ih6icEl@WWliKN88a-=U>xoDX2mGm|dz(ISK2W=kYoJB(?Y|9n;SA{6~b=YD{%PgR>_uZhr;2*zOXdC`X#`|HMmuWKuZDdr*!ZbNyA z*yM`g-{kRzJAM38&3L;y?Ws4l7ew8#zp9n)HvgFa>61~m8}Z}OPu{}^#gRMf-|NTQ zL>_U=zFD)5atITuC>7LmG;l^8EG%kZW@hM3!3cgzW5~Rxca0SV91z$>NLaS(%QaEs z=J;^#4%zNcE#In)S-Lx|-g!MY&qB-7Gg-#hJ zNr;Y&@BkrXLbU^E9EoSf76g~AAL`?l7gR)Lo2lS`HL6ed@LbecY})1riYoHo`Glxn zO|g97yAnf)Ms!Lr(DI2kMK)@+|9+2oAi{yu%zyhs;OrVS1-3%xi<@iVwSufFL3>BV zrqpD1_bAUgAd6}qG_6F;IvE?iq@_g zXH5MCr8{tKx#tC4m&eP-Vq{QLTG`6O9FMf-&F^+pH9=50ht+M6g1)Vu)6p=O&DY>v za$ZVl=qEng7||@6hxwFw9DZ)?x^uKBdd2qnA?g=XP9FH5U=Q*+2mao&{BooupvqLJ zA(?PWYq0;F-I3O_;}`>`uaY*olFFA;5nqN-M=nP{=ukDAnZQ`uBcA0!^VUtE^Ld@C zHFJGGvYFpH?xaY1)Rv5fmVx)Vc$A8OQzf!XM5N%YBwd|re#s5tykJ|>A06Jys6DNn z6tRwxGtSi(k4lLWA-j_sGM-B$F7?>39ZvS(MA-rJZPi@T9e>$A3VYzb3zO%P8w4HV zT0R5mW-3S+N51M?!gFQi+}^0p43?0JY^{vT9QtnD;{jTxg>x&>1O-RjOPEcN%Hrs7 zs_xR(5Xou4?kR^&$=VP+RwI7zp)tqh!TjD)?*f4yr{y#ILA@ z;iK-Jr%piQ4O6J#g5`*cd;h!&qbBy9QRIi4zHbfZIQqgs{e)QwqZyFq){xcSl?O_q z;MLg&X?U*OjhTG4XEsR3D&b7*GI-Gp@R78@`p;uk+O5eJw$}j(nnG)BvaiKoD{-eq z%IaiLlGTpKdmw;BR72OP_~uo|QJi~@Hc`WAnaC975AtiOSf;uX z5^cWP?L+iGjC!HEqEF@ozL@Vg2#q@@7}r#q!|T@h=i{ zd9j+yri`sAMjjKb=mjWPR|yu7uu}U?GY5+T<`Drbx6yr8ATN*i zZP~Ng2as}i1!AZ1>7I)Q?!FB8ldXKM5X0mE+o_WHHn0s%rN0)^3UlVAhSO|+Z`YjV za|*wE6cGN!Uom0(#7?SyYfGwV22Jll^>G+NX5j!;v~NpcL-2-mjs8akM+8Jk)KPt4 z{CSrOQtr;JL0+>k9$RMes8Q3(*ox`p<=A$P4$x^)uO&5|k|RvZ;YHMyW0R9jiHp{v zf-{RJoHZ6xilADdl$QzE!T(8PR5tTYi^6`Ex?(smB-t8T>fj@x&o@93XgApQntbM{TGtBNii2^+GF|<>gnWKA zK6hjg;FvC@=i~!f9;V}oOX>GDv{p8c2(DJwa)Q+YDY$SVy10w~3ZWSBuF&Sc&CfeG z*4gNeGZo%>9&A_2dX#0^gA;3N(6RfgaC5(v+=jVZ{MgL$C>coS_q9=D4&_PlRYlkU z1Qe;7;#oxX$jh#pFUZGBtA2M?Cun>lRn6snT^Wwe7ofxZW=h}NS(Q?z_mecdmAZOv zZ{--T)eD^Ab(c;siE&(Bd^=vR8sXX5@8nLntxd7~9~KV_7;=>@K8rn2)hqNkFs@SW ztA9I$guaEY?sk1ecIPmlhXfO~uc1~)LA$U-pqpH2EPRzG4TYgK_d_W^cb-yQ=wE^~ z9aV_z>Vu!iPv3DA;WV6Guj=A@1}5?vf*xo$?0XI-|8kkzwUu_&)X;uWCNbUIAQ(<6$Fdz#1B0w*Hj)@Q?xY$t_;XU1Q6yFwC7T;|fNXy*Mzi)pF|l(6B*v z!T(@Wy=zBox-pk{N+nDWC&WSGCei($+5H**#NW(Dp4`+ot-QJ4j* z5__3yIV9(j7Ld~KHP(S&hK;*cK&{wq8B9pO!Z5I zt~N6J8^Q50N%YX2+?XwTK1xISk^KHcFDnkfCM*Z<1E$@yFh9C^(Oo^|`xCb#-!(aGDjFH26gSh!dWK4tXE519{hEbk8betG67nSf2ocra8IMy`MJPtC48w zpu!_iXZ!SVtp$2P|KYky!_sa7V+;>2BP7K~O=f$+p{43^93O-wV99V9s@`1L|By84 z;kZukbpCA>K4tGcKTe_|iQ=|*`zS|(t3)C*HI~O-C>p2AA)^;LajOB|Lr2Aw+2T|i zH!y{4J3c=S*2;1m0QImrYpl#97TeleejKM6!P2CnALe-vrEH5iv4$`RfD8WdsLU+! z?|gbeq;4BA>R_fy+WCzpHECcn&Bdfh5#qfZ8Y}Z6f&5iTN=`I{gdT1(47_+WH1Rhc zjATup?nHx8<*LtWx;i%=)FkN}%(_pF>1&Dj*MFzZo}lBODt0%7Q(DQ%HOj~Y!lS&WlM_3yfRyQ7S#DAvSo z!Z1d@qms*CRY+!cRN7~>m%JfPR1Le8ym}b*Fzjw+-hYjm)jJ)IJ57mHaGK2dKD&xR z^0#Zvs9Ot~qOxKGYCBU|BGkfx2~_gN`p+4eC^0$Qaz`xi$c?c~^!02`7PZ3v^!62A zRXtz47Z8v}K)PGHJNy#TAl(8|N;gshf*>v3DBazC0pZdi-5}lF^@iVD?_YQ>m%7wb zbI$C&Ke1<|0&XT;PQBFi>mqt^7LHv+pOVI7{*JshWQd29=3aakG>rFieCwD}mWx5g zwOF75ueQ4prSEZLx)oP;=Dg72rM{cT*j^_jAY7BsMDKB@T__nKr4_uDo!3K>7f%8V zGN-@gv~eia;XpM)V3}@4GB$dO0pUghBfuCCd|%jKi2}v!f`3-2)`D+pKM~h)>Jk>T z){xQ1Mauip5-e}=6T=V$u*b%5wdAX+a;z*A)H%-cXBS11szm}LLw#Pa9Zo+W z^3?U*Y!|yd%Ln`H^95roB3Ow?QHaTyGbL)qEES`NXmU8lG-Pe--#vCeNP^HOJUXHb zx?(62yOK(M^hCS^MT@XGb+81z44`r-5&pLavg}m^t%?bcQ@)utdVNeLZ zuMDf_(C#`7oSAU}jb5#qahtTi8ApbI8$2&+$YeLkkCb2+67u36NUBd)X6Zmj+;Zvw z7YU!8g8}D#PT8AbWTV=%$4gBDzoT~6-~qWDY>T`nh|ILFx=gy&(rm4B-I{hijZ24UcbS?r@Dhrh^d6f_!HZtZ z?`+AKBKBK|`yki0k{jCw+|oi~)ZuNT4{KkZ-5Kl)bhg{Aj8N%N+28%Iiw3ai(aLe3 zH!m8JI2J|%SHFIJ2%`pwGzqwDz~FGZI|`RBDxSI)yqK`6P@qOIv%{&di^|SshMUA7 zKHsEGexWgGE0|E)FEThoA53J5ph!wYxuX8`8{6|!1Z~54rrwhNXr2%r+&S}w!}Ym0 z=C2GD+7!^JMBUQP_D1JJ?FQ3-ZAkHcwE*jWKVbjsrUl>gb75~=2P}{9(eaqEZ8=m( zmjI9xgyMrE;f~L31SSqfvt8_8nebIOuJ+u9e>Eoj*uNi-Sxz;`=Js&$L&FN7n34jL zY!yQXg95db(AN)iqnS7rTCs_=Lqwph5VV-iSJ?~yx{b_0^;_?Rrt}c{gia>>CPd)2%gwshUjo#6CnWbq|a+uRptx`(oTId}oL6(B^b> zV@bcDIq?*{_{3#cBq&}%bDUvF@RJs5F^S>0AqV|mf5s`dkBy%y0%^0Z6C{a~ zEtqJ{so}c!BQW#y2OHH|S4L>(AvDp+r1^UDRo;B1qSonX#$CD@XY1-z{pC5 zlA=e4vQ4cIQ`0~Fud9a1f7Q*~EiXDI5nJLuP6MvVJS9CCWmXf<>AcJjW2Z{;?Mo^b zsxbKGTy#R)>EQYJ7rh!#A?qAa2yV9+;hoG&hJk03Dwq9UGjBtpPKQ#FY}P{k`rF2b z#|v0nT8kx|Pbja>B%Ks{iEZ50NB&v9ygcvk&yBp$Kn-8JyrQziaP9Nq!aF>N%SUOw zl5KqI*Eu+&wnUmJNwQA%*>9&_{KzO~bUGW{h)?tSO$(uJ1}z)5G*he%DuTk~tX!`v z;BKnIG63c2{h=+7Lh`O~d6-VBsL$nX)J)aNQY1t8KKjgC+LYg!bdRxRJ`yf@Ab==V zr;UYrDZDhv2XR|)*MA2puKth-6nABKS0{>#{XbjvQ?cI}gm5 z6tLvJ8U`h0k{BQKyb|uY<}okMUU>M3U_YFw&cxpZf)MD8oL9feEG&B=;3nz^7~6#L zff>6WBE8bn2b^Mw*2;|0rm5evF?O~#Ks&^Z#RhSk5e-b!jry(A>chUq*H1+ZMP#Wj z?hk)Z%R!_PZ(6a1>Oqmt8vo+$OG;3(kL1Vwi{GgjlDi=U-SeoVT)a`h6IP5VY4+o@ zDSV5Ek8fTiW&x)3%hKoIxgkR^pkcM8Cf1W@BLtcaos2b##Mp5`B|IngJ+0O^bN9Uz z(s9#jlls66@Jvr;zRWBQ$X|b@0(DiH#EqcS2+mH8V{cJ0r-Q0C!-Ix3JCV}xYAnXM z5>1B>=s^Jsh2=x##1eiPgO-CtMq^@>x+=+_T*Wc!x$ z9bZBF$!Pmvwh&9EU_D8hSX&_IM^h&Y#U?Tb%?#W_PM|T1#bBueNPy3FD8yyg(V5+C zF@$md?!sFRId~z}xGjr$s;_k<|4hNzfYh$~lNrNTYC3{wo5n?1knX1NVQHV6D$$32 zJ@1^`wGO!ROr|voF(nz*z>&L25Oo;s^e8P(-_f7vYkbrVWN+JBN!29#lO9W;h)k|H z8JtV|+?1x`v?uY-vGI3SujF$>&_K+Ss5djkJmv{!MO2OTYtc2gu5KB8pr(4iG0*e6 z$LtOQ2@=F>&;W~>l2%etTf+d1^2o*3rX^(&fLiIG+ zBAU|!8?aPvYbTJd-zC}!tauf&xGK2wMXO2Pc{ScGINjn6XhdW0#1J@CF8+M7KWgjB zDX2oid}{K_^KzosrF;i%i9`)e(i&`H%3?$^7|%Z}upa)5QQTZsIVf2A%8>h8R#rja zoBvR7kmQE2>0!eU4`Tt*9GOLXT2~&wV3rO!#gW{Tllh0DR|f3OcCD*VOT<4IZ~~iN zPLhRm&IK`+`U`-cwjG452aa>!98=~^Ur5Lk&|9p zi_{t>a?a*5-aI4uiyEsHO%!jvsqdt*cgQJv%{lhKwGTH?wn!n8b>%$ZY`qCrg0oSV zt?nSPqwYcIDth`axv7$5crAwN6b~&9lCN!?pn@ceVYU=^_A0d5f>d_re?^0sr1ySX zdZM`PnrUhqkyF6y{xNy(FZ)?)b&T=(zEo=eeW*)qvAIHu4Z2MrtWFHP-h@%cTGpyh z+YD%QwJ|2VNU9Uz=vw=CELtt-G-7i}x(>waFA^qOv5nJ&WK=|2cE3GhKD*5&13_1-8;Z_C3wp zuV(4>uJFZg5jMfHx94YT7f2SuHS%sQBfd^>wLaKlaILBdA1E0gSc$&HMkrWYz@v&6c|Ga4Fr zbhBp>1_gc)McYj-5&&!ZLDC;jt7y+D9;rgT*O^5^ikhzf_GSW31wN{5@ii6Nhd3U) zPTt>@;9M?C7iv~YP_4?Q%_>L?e!#gMx(Yj6uo-7R+mgSq6@E1MNuz6Cv~9S)8loUC zc;v5_P+IFE1-+4j#%I&P$s)QARsIs+CMZX(&JmKB+17`f$ZP~UopA#E@mEuZLed%liQ&xld|ZYMScqZ_cB78`!wS$#K6^$suzx$%R(a@)5ziS1s)x0&5FRHc`1B6(cf)9qKqPair+ zp@wyL7~yss;ckRce7$L#$3W=EQAZ6xHgg&_xw%Aq10kFd4)Adw{8!uv$Bb$3i(|a4 zXYKJ_9No-q!xeTAOXF-9S^(blc-Vzoa5Rn{0_X6(3PyI&mcxbIceCC$JnE2-m46OB z2d3URC|GNnhl?^!7tuoF1ZBD9)%KchzeP7IXx1_zU5&&sXvel*xRL}fRs?;2tXRza z!NY467+zEK@yeF9xxI{IjnZP`#RSBs>GYeJ^j}j$x@?*Cn@?HWH9-l4`3tqf zF+CD{#M$=^)IK=jf&=Sw`=O8KL}~>^*!DqTSt9kX5hwa-;c5+i{`>r&6&>7(0E-a; zRo6jQ4fLR)e^GLv`esC5iKjN#GQK_j!(+*Jx#imAxsNkCr2m?>%ur=)W(s_qNo&XE zPF-JFu%VCP{CE}88L4kkwKugCm%_!?t9(HFd%%mhz6QoP=xXO2F3U4~|JtOj81-cF zA9i&@;MYfpYoqaWX)9@^B_9lZz)?zKWyr;Gu!0m^t^4r3+TXb#!fZREfkv5i=cQLS zRZQ=6=*QQb6vr+W%&8tPEO4FqI8b-ox_&+;@3)l#rsAUNuI@wVY1&<~$yk5Uh6U-L za9U#}ZKDeC#tUeFi{fns|B?v7dMLscXTf+iBa86kOWH5t@1hTmXj^2_Bb9gS_adYQ z%Q2rys=sIk0^8oZ{hW6`e^6)u9G#1$;5}OncO!2vi2F)c$V*H=;jm(-i^qIw(pbjX zk1@huv4~G(Q!h%CU{fE1(_=pS4ux{g}Q;xHQV2_*XKUZ~!sbkUD*Nn@< z3dyHm+^Dw@ zc|*u*Myuc`&}Whe@%od6FgF3BZ*_Os0HSVknVsv{-DlUpUs^I>tgT2O^VZ3vS{Xx` zUS`?xMXA^tY*i(B(%|gW*x6{z81%#kq@s%HVXmzyd5pij30Btn-P)e|T8tKMfnify zlWZ0C+9U}5Jq;DHC^b-uXd90X-Yq@U#mg}DGcvzl>r)b#PiWhSHa z@|p86G>v;BuJe~>#W$!dImR&Q0{6Cn*;kM_;O=Sq)8E48GFHN^E}{d2QysEkgZiL( zqtC`Yo$El>CFDYrnfx@8xa#P*AXfEDlB@fp!&Y%bcyoSGkCt4U)YY|6sF&u+|K*0PHq&j+i+FSY*CayBa$eeMU|r|ER!!LCr+YD|kM zP3~B`5}bzp5FxQt+rIaGBXg3DJu0^5gW}VUhX}mh2D5gW7E`uIw#J8pZ03mMvx{T& zm4pk&-UwTwI+9^ZDr}t|-pOpkq=my`aLa>O_jwRKMF00`^?7xUr-pk1TaU!_L z5g&TjMt2ua98_FR8q_ZFti9U=7WhY`!XhWt9|7g1byjSuIZ3$h))`feYM1WD4=504 z;*N?W49;vsWfp6Yz&AH;O|bN9s%JxGN>^~mdQJn9R4rsIO4fk4Js(fd{m)@}Ua3Tf z0Kg11$Ey{+%W0|P_p?2L$px)6o7LG6R-1Bjr#~69q-`>M zK25W}P>_N;+*7X25_086RiM)rBa&ict2yv#LKLD zEg{v2k06X;Dsotd(5!RNP3EfI*Ih4_3wU#hkhe~5}LVFnC z5~vXgGpxUiNj#wEACPv|57%^6y{}dMdi~~4nz@PI+$7PvqY&vw4m7Qzaj8PUf4xE8 zJUNFAxdneD&cCF_l3sytO#lh0@uG&hxyvzJg97EfEgocF|Fv<#7g8pI-~xlRFTY(H zjX@!M&OY$lC?q(>o8PwMx#UKBFll>wBPHS?IMN(c7Quh_I}f1LvhV%O_w{fpm*+an z*3=^0oV7vO{96w@w&;iD??xbg#y(l@wopnUC8am# zXONc9Po$G*@_0&>sbxdJIR-!cTNsKs^1qC8>od6$JZ!`B!+NXMXs2p}G{*`(6;jd4 z@wdEIY=N#^3l*-*6Az+Wmd%1^*tJWs;{-@2(|T2gc1>sqTW%~}^?~MvIhocfkP&^? zdktQ5Wxv;ZHotstBh<0_&EJ+!Bn4kuGzdH~)`k`aHkJt%GUo2Vv55%%it(;}ME<2z zeOpS~G%;FxR4U>>sr~l`iXJu+t@e>KPXE^tNc{(LyLc>@6AjTGW(BTTuj%`SoX!$` z)auENd;JvOF;9&-5C8qSFJ={yk74LFvm2}9$;{)z3JK35Vf{WndpAC>Ygj%tI#)pz zlHRE1&)?M@VQOAz$kb$Nup0^AVCb-zB(V@$>g}yn8JvK}i4mrSaG$YZpBNe1Ge*0~ zC&eI@<(Tm#)yh9*o{h>sE6^nuNuJO})tvuMjRr?~fj#lTZMaGpeSlP4Bsd+(^8j;= zzo>{NatC{xve;iWsEp413*8%c7s=^1cs9A7Mz!vzw$Isla;0N_f8qHF$YO=gcHGl+ z2ccqxs(U{V-Ddj-G#T=S<0g+@Z#5QPEBG&(+;^P);$f)PRTg>)$3WOr$E3@kIA@xk zX&v=Z0yop;6)Mc*WL&Gc>pDP{#wp&5c!w>D!hJ3f#%176+Cs5?UX_<1RLe5@*>ogRN7zTl|8yUVRgOxar<+*`kaz-dVicwSY9+u5Bcl?7kLmJC+N}oj=LTgkcye+y*$Q z*>8Z!^BQ$L(lsrn&GJ&B!gc2Y1Kqf&B8u*S3C*r(9n z;N(J8Ht}!|I|#B?Q9F97EEDVz4!z)UY$$|EyQK;2xQj?m7s-_n{#?pWKQ78@y&Dl{ z`Xwg4IPavc+2wxDc~jQ5;2{4>@PizZ23THVGt#kJv7wcHV?MX1W+E@`aZ99bdLp&V z^QIbK<`BPtQJiCLlvh7=%eF9-%jIAm#L@3ib+@!-IlVd47QWp0YAp3)N_N;5pqWCi z=euhEsBM2(abzf}h?-{zp1e>MdlG}f;tyE#UejaCXFjP0_ntHnO^6531Y1-;4eOpW zDVF|mR(D(Aibadk+VjC0T_9hJV4oz(bSa<=iH8&)phD~Zev+Kd0D%7p&cIk3UHRnH zvJ@bID4TLU({GZfc_@(uetxT zua-Ue<2}t;p#iCxmgerrVS_=)J4p$hgOu3NIGaf_bePA6GZ7+3h~4&_vm3I>-XBXC zY40a!3@eU$AKI@XLF6sBuJpCgc>OloKy|W4Z9W+cP3PMFd82yO<*W}8F$g~X)T#Tk zU$^*NY*`&dC~fokGV9*-To+nttvxr7fM%y4?H?JtMhP9YrW4-mytUn5oFsOY9%hXZ z9kMd5<@fO{y>KAL+jEL5BF$^>7uP8G)Tc`<@Y}%PhKtIUMkob3#GtEWVN$~KH{-^$ zDR{;yvmv6Bn5355b3)Asx+)>G5Rf4W{E_z~CC!HpILW>9`wltycK?~8&6xOk4kQOU zd_#*Gw#@gpYV*av>1w%&pMXzV0pA&VbHFvPd5j;XHsseVSz!Otuce-SIxGBDbFd17 zs}>+vti<|WW)_)9dV_fB`MrJbVQ=W&N3NHn>Q_Wfmoba~-DjwQ*i70d2jyxuRn-*1 zeDXG2pwBXvKrwZ z7^+Mt5K!PE2}J%`2$}H%`LJ9`)%}y*`r#}P+=EincT?8i=yHy4vExbbe!_y(7E&epM-VXuZ zsSjRmx@~No1F@52jxIl{!428EW2a`>;Eb*k8)q&4SFoF%e4xQ3qP^wVwP+d7v8_$= z`@Vro@qI_0j6_myqiEf%YLheOfVPv@iczzrH|}adi#GUUP+6-5I8n9>&M0TF6W4ol zcb1~2y9=b;tMlQL#xKX;aBlIQs!j&d%sZ9Bn0~3>u1P%=r5Sgk5_K!#M#MOPQH8Q z;u26bF_T7|MLpShr`Xlowk&wh(QAz`QsRX;k9>K9wC~Tff+smm>X@i5_WUkwF*$}0 z1vDjJQ< ze#I?Hq_3~4dWZU@x$wIbm2vB$ze@s5%=5eGL2#cV3l0TT;-Q!R1J)#9#})+Yu z(ndR-bl*^__18)$Vw#d7N+a%{8`S=CTo8p^T(rEv>)KD~ zQ^bn1&$q*kxv1Mo?Vx0h?W>0O<`(aj%1$?|KUIr4a+JcM z`KvBKYrPGJ+g*Ns){$ukkiX1IZd5N*y0gFW7283+y0H5WQRic0L zIg{E^UP~TT{{r4_`e=UdkTSw#^JV9lfYAt=TO%eNdDQpH@qJg_M_7vuC-A57Fh&a1lo28W)y*@*f6z_=6R zRrOZF^{sf2rbUfZ^xyA7noPtTJYkXrYl3T`i|CTm_|ZB-jJ2ih&Hx>Vt+FCcuMg?|1@qAS@J^O^tqp1cjuCdPNd~u z#VWReZ_)_Yb%IgJ9T0aW_ZoYf%guYoqE$*a(94F^InM6}t{&<|1|3vCGJ`c8A!5AYzqJ8sTuYH=M$HV$slWaZb%9R`*+G01^4T-+0^j zB0*vIC>O?8s^UGNX#KYIX#`Y>rkBHqW1PU-zfF+9)dZ6Ykj8EK@Fi6tW@)c{y?x?9Z?vjV$ zws+>oov&MGOz(!58?cy1w8~UE!H0IuHgsE!+~r8_v3!#a$3U^t0uQ}dN}cM7r6{}- zcJTAW!SGkB^MiS4J2Mb9@x5x80x6*yOl0Y(2|Je&=XkK5tp|`@*lrd6%b&W}Z=`P# z&Z?B^_7=Kz@FW3{m>1P`rA#+nZ`=roDNASa4Nf)HsF3N@N{-%xa)Dxc*;lG7>yg3z zaT`#DrZaTIJO1O)MaP0Xkj4O|AzZ~VbnuNEt8eW)bN@`!QBSa$JK@hlgT1->{JJU| z+dqdKHm}0E5T4YT;Z7S?uUAu*@nrVny7#6pHvJ5%V({V{pCfg*`h`5(q>G=mFgd-t zo;(8_*)URDweZWyK?A(?1Ok#D!skmI4Zig3Mw4N&STn~z4@2&XHES2~!_U5?V8N92 zvDf!JQV*Md*}$+yW>?025~jOd83<0HH*|69!e@NC)1+(vWYPt9^Zm4i$*I*dxXTaO zKs7`rKweNOj6bPZU?bzJQkq{~c5L4Cj2@n+k8THhSJTa`9VYY9EDlGtsCeP`CW}TQ z@nhv!wSjR@0uR9cEsJd&@o>aGeOL^`z0_om-Vc4tI7}Bcsh0kYwJ_EgKO`7d^Yc^= z;+JVXAZsI>mSI|%dc2T>592Q-YRitAE<5P{ubIwn6SkkOl%SpN>JKQ`mUCUJI^@Hm z?&8h_yOnXWJ+v2#b^=#Fyfa!$2oSu4%Wr*Kbx6y6+cG=`CW|^8eh}k)_p>t0!B;C! zERrtDU%pds&rIY;2%aYkvuaDGF#DO%r{$C8wKj0t1h|saHahah7JWYH#@+S$C!*yz zHGRaZiIxqMcTv5X7Lxg_^4v-FB7>#*1dbCQ1VycsbH8~l+pV{L-Wdz-G<$`^>Tfwq zCd(yw?{w4zfX*3|$PJcBY?K!UV`Jj2wQQFaxyf*v7Rn8RYC3eN-lREyX%_FBLChBfTSOs_Q&_yB}BgzeA%SDzl)R!oTfGbN0mmp3ZbfZN%p} ztrFBEycR|Qufe8V-ikMPYN$yK_fxgmuDNE-grhXq71Xl!Q2yuQ$=sSI&Bqsmd8+TH zt#(0!bw>Z8Z}V7XI`aXS&woEdWvo7<^<$N#W^lS3m^25x|8I`rnT7($*$Na(s;-7{ z)ko!-MS3{?!$3kh*{3B!+z#~^=<>GdZMqyz5#oo^MNPl@N$hu+f@JW?9lJ>O?tdd@ zg%jczu3-1pRz2JqSdW7E(~s9>hH8-3Hy1Rq+UKPhsm1Gul0}4}AQs@F4vZ9~Y9VB! z8^)RolYw3W0J!*R^N{`^f$%Nxlu3-9+`d?_u}2;LD}`_IolZUrqJLyjB16&r$ac1oT4n-VH5W)zK!3t}~2b%Z4WNQ6IRy+7AVj zN?=9s8K#a&IyW4yOL!;m_z1F|2PdCO`i-<>ark~O$=NOeT5-_Pis^NvE;`vd9lLPr zFC{$$N4qa=8E>4|iE@I!K_7$4l?4!&Mjni5JJX!*fm2!;V z5D?ZaA+df4wwTf$h=O*{qVOg8ziMH;+#19+?|OmW5;`gib`(xa-6Z3b(lhdBZPV?T z(;Jz>%jSq?fVn!r!XH?n6I#0xHflK%a059j}6?+(- z!zPu7TX0qhD8!?fJH;6gfxf3^Rh>urE3D&zn`l8imI{IE&GJ;R7Cns=32s!^8c@?F z0xjM^cwhujS;=WLfm~ehJ?4O+J)|vCtUieBMgcJpAFL*-7^%WmIjp0LYjRTbQoJmEAzlxl}q5u9ngw~zVf&3fq^;>PR8 zO6Y(aY!y%cYG_4#D}ke;e(=UkMg-HEI7^)21(0J4K8J#JezMC1fto#d`uY1m5q_UA zQ2-i7>;Fi${|o;!je`aL0&gqV{gshQ?|u6YJoKgHG}8g*A31Q_S>U!sp&QSa08WfY zM^8@V!xVFO#y5qmg!y%Ew|f|HMhsrd$(8zNJmLzH*=iOBBq0AP^KI zm=z;v)II#8ZS>DeP}dGnv*&E*y$h|Vslm(r=jw`uY*e&paAi$Yqe5XNK>dbW6wsmtp6vykBy78kPiv@d;H z&rbe}Ityrb#bb)MJLl|0p??ug#>HBP zEug6duLE}23j@S6w!rJ3Vf7?In5TXCe;$ zzeF>PXTsSnMP*dMD;s~6F?>1ed@uffDT znjw=C0(Q=iWZ%D8!dKPRqP1nPD`^MbCX|O&^ilvtMB@z8p%^?mnjCx#s0#3LzJL3> zQ4}lo^dSb4*@jXj{CjpYHVNs+Jeh10SguA6-LM#58OcoYMEUv>MwwZ$fE+ky7E7tl z8^id*rcU^$3$@%^oc|TyfR9O|#10)FWkO>PoJ%N=Pg|npG`4{iF-3zdJZO_Mt)Jk{ zu=bUIIzlq5-ZCl+FliUc*#XK}5)41sA7N(A#u!i?jO=n((+vwSX?`;g!?@SIc8T;~ z#z$*~_c5srf!iBFa(Ye{4|QsWEUbu2l&vbZ!u~BSu_e$;on~=eK`omvN=xLg-o18g zsQIIL`O`Y=bzk8O#ez`+^5rNNt(FQ(YVT_sT*>K#c*j)XmsZZqX zrnW6mvI~kLk3sQO)xvf;FBsN!)oTH!Mhns3m+xGi6M~XHy+}GY6t*V8aXwclq7nJ( zSh%r8UTe>$zST!Bv8bVvg)NWek|vp!AB?8bxKvp!^tzSd>;2!ErI z=qBF=^UcUVmH97y$G!Rtvp`u<Bu(tOv%8b3`h@cW~MM_tQ1P&?wUVAW33?|0V>snj<`2Wp=TNiwg%W4k}Ck z=#EYOpPgYSxDpn`0=^Z*2xXc5OXd7Ag zm;@ZywI-*fvz_Aj&Ck#7EN%|XD;Im7)viWoGI01M=+5I|0P~tZEF1uwj1IO?dg-<} zry$pchifAX5bOO0cqT=WgArYPY6}zJ0w9J0S)B9ox5xth^36v_LZ3TeD~liVQ!03l z=C*~4{y&Vs+HpxBjV(hK4+Zu)oUUvR#Rl$G{RqTtSEH{|Y_0eI7LRuQeoRK+`PKLP zkqG=2qLmV@rT)^IB#Y%dSQmT{o$;@}8W8mukyPf&bp5Kk$THHAuj|u?w2QY!q#D_9 zTqM1rMJ2V`o-=Dkc-0>NuAXWO45zqsR_6OTkjvtWy(+XxJex1xU0pB7LpX2}_lni^Qc-NifgsLu&gZ^6O> z65iUcZ*RPa$O&4}7SCBv0Lvx`mMVP+#WlKf^HzFy6sQE0THSGMb0C280(<4-;Rv&J z)(f(Vlktg0xM!tB?k-iEXWOw$H+bE7o|%jISeRWN{x_SYxLZyI7+ic>RK&Ykvam_O z{M{6NK<5%fX;_;?T1rXLyO5dvFwHI}5goi>L!g_?R@Kse51gJ1k7+l1M~%lGhCrPB zmMPHTkY*fc^hg?s`|{~@%b4-x=;GNBxS~BR^BJw^32s?euQ2#&aRN5+u>%xiA?_u6 z6F5CQswm+cE=re3+HqhqGJ{VTSq!1Y4fZj&5_!Yvra%IW0Sl(%QUGriyWdQX@G#5f zGm#{^Swj(EaP3f92y-c@rA39)o+0}&Bk)FFD=E|32;sFG%p`XSEKed(1f4ML`4^%J zk%Ro%bg9$T&<1gWTeQ;Q_e+N?TF5TwgmMfj6GagL5kRZaTLVsz-X4#~)y^;1+Dt&p zW{KK&CT8g=F@lMfUAWY~JdwDGultbl&t~q0<}M{c^fqIX+U0Bqtaow9_oJt6=)MfK z&{3n@P##5f4OFR{^CpjCnhF3bR@Ov8VGwRGfiI7c5aSxmQI;h|g6T9fJ2D7uLcY&k z_Wmxt(1;G{$4iyWXR=SoM2bxqL$SHdXxtC&08_#x0~`$y2Jy-lN;v)Xc;Ae;gpgqVvmmXrRH`n}E%DfpjU+3`DI)68Ii3I$iFn%A~N ztd*H+a=4cmmTSlVmT6fEt)ia@h&+V=ZV5s_ZuqvmGj3U9_h00sVy6i$ibLLj-w5DV zg)x_DQZoa55=if|lethSo9tu8?Buv}K>f}9F0~n5i&KTLDWQrvNi7{bpxL(G3jq`z zJ9ietXrP^P#iqiSYLDZYZ%QPG;w=)O!)|Qqcq0zpJ-$ixaj&7jshyps)nZ2p2315R zzFed+iD#3{MA>EStcA;$R>L!wp^^5 zwpkOW%MfI^RycB)u+fXpcUBlywWjj35MuJ}GupQaUVU}#zs^OCVjkowFTck^B!a zy>tdkYQv)&hQXWq{o0S*xKb7uh!U-@4-GZe_Eh&<=+syDz_i zE`tvy36`9gc@6UYuHPEOoOsKAk|6*svaZhecENmVsY2SbN~m(daX?X7eC9q#7hnIs zkCtyex^l6>t^LTfUPkb(6~=@lqLqm^_QvGBZKqd<>qPBqC>s{0rYIfcR0@O#+kuvq z)(T^ruz^FuGp+3*?z3dDO0TW=ZRyj0o&S6c$@P~=vYnlKd8VzSZ?ik#;`l%aE5R2S z8LgFX4#3_7ZQi_wSc(;U>vT7EY=XnLn4h`v_3G0UeneU5{4&NQ`M35%bZA+5-s}nt z1%MeZt-(-y1499q4+%REc-qKDjs1&f{w*B-C7Ke?Tx=E1#s&lJ*27<=Sbt_f?&`?Q zGOL_}Tf=}>`tder+5EB7_YxFAQQ^9UvU-w>)tCyyLpGV>EMOd*j7AiOzv3~3_vUhF7tT9AtC7SY zaZIW07U6DkyByfAT$-g>1McicdR2^LQO;tTv}dXiyI{5wz*DAJgG&94LOV79d4P|h z0!-|kRf`qTVihhsv(q4+_6&A#4NqWf|AawnNohL>*LclUYN$3Z_r&LNpb18<53F%W z%;Ca{nTB-#X;SdCYVqEqemxxiIhlW;CP_rulQs=e`W{0iK0gH4OuHpogM#aC4F-f3 z?7jH8bF`Kol&uk3q`vEix?f_?@y>?sA*yeXvE@#?ulh(_g{%rn2M~+>VuiH`LU^99 z1t0>I1C#M>>OGWJtV5@9AQiiv-@{Ef)4?h41t`T=>$b`kP=VmB*UD;1PzQDn7y`f$ zE`%6P-L_sjZ#H=0H^^h`Tk@*EWi#rGPs;=W2mY8_x6dH}=8&LUn$8T^WaZoi`8Yur zdEl~XTR$z0eEXL#h3uSqGWMR2!E8c}v+W}PjvO={Sfx}h<-QZsqR2{uGJVaj(Snq{ zYppXrCu`GeDg3+6(LiHIPASeydj7r#>;}$c20SZpLrO$VQxaM4D`3ukK!S&-+OszR z9)z|B)jW8q)9;;*Vs~PKwTyeju~Wb8sW`My;l{yQ8lMM8YngDo(9f^wEBH|O%dYZj z`-&hYsr+qIE>ax&8t@jb9S)V}J!4Dwk=d>-244 za>6uO3)+I!g_N3VB)}otrqV?Gp)g7c$+BhXv+EJJiuby^mJOv;}Dk!5qtN!$Un8* zG>RQjvZ{0<`bZNZ@%C&vr*jOnf3|}>l@HZi^f<_`p1X0Bo~h9Ra&Vbp^S=A|W<{b{nKDfw26BQNRJMrLGBG*bz2$e7vEL>P8l&wz&Fo zw$!s`Zgf=2c_F9pvbq*=4KSgsQC~+D8&apeo;5W6k*%o%4FU3o@uC2kw2+6d@ z0>r!nKG-5U5#U4$cqU+dc?n1BhS&n&dGGfp#6@2KankCgABwjd6d3Z`siFfc)QO{HeohnB|{bD89Hhzg#6``X4 z@NHl<7--LuY~y2*6}I2Kn+}1p<_iApeIFp5EMRZQad6Zfn~HIwTRFE4DPd&Yvc7-b_W3td;z51N`7i8 zQWK10p<~qJ(oE~B)P#y~uxIrf@FnkxWBve<4GM~0i_daBNuk=rH*?v34CiP7q jw!}-S|F6H8v_B#LF~lJeY3x9UfFE+w%2H(##^3%AHG1U+ From 2e888476b29aa30393d0c108f626153908d21836 Mon Sep 17 00:00:00 2001 From: Or Mergi Date: Thu, 26 Jun 2025 15:06:45 +0300 Subject: [PATCH 077/181] Add flag for preconfigured UDN addresses feature The flag serves as a feature gate for the feature allowing connecting workloads with preconfigured network to user-defined networks [1]. OVN-Kuberentes flag name is "enable-preconfigured-udn-addresses". The feature doesn't support non-interconnected mode, hence no change for ovn-master manifests. The feature doesn't support DPU mode, hence no change for ovn-node-dpu and ovn-node-dpu-host manifests. The feature gate take place on ovn-control-plane (ovn-cluster-manager) and ovn-node (ovnkube-controller) containers, both single and multi zone modes. In order to create development cluster with the FG enabled use the flag "pre-conf-udn-addr-enable" or the shorter form "uae", for example: $ (./contrib/kind.sh -ep podman -lr -i6 -ds -mne -nse -ikv -uae) [1] https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5238 Signed-off-by: Or Mergi --- contrib/kind-helm.sh | 51 +++--- contrib/kind.sh | 155 ++++++++++-------- dist/images/daemonset.sh | 11 ++ dist/images/ovnkube.sh | 35 +++- dist/templates/ovnkube-control-plane.yaml.j2 | 2 + dist/templates/ovnkube-node.yaml.j2 | 4 + .../ovnkube-single-node-zone.yaml.j2 | 2 + .../templates/ovnkube-zone-controller.yaml.j2 | 2 + go-controller/pkg/config/config.go | 7 + go-controller/pkg/config/config_test.go | 6 + .../templates/ovnkube-control-plane.yaml | 2 + .../ovnkube-node/templates/ovnkube-node.yaml | 2 + .../templates/ovnkube-single-node-zone.yaml | 2 + .../templates/ovnkube-zone-controller.yaml | 2 + .../values-multi-node-zone.yaml | 2 + .../values-single-node-zone.yaml | 2 + 16 files changed, 195 insertions(+), 92 deletions(-) diff --git a/contrib/kind-helm.sh b/contrib/kind-helm.sh index c682c94ac7..21d7ffef88 100755 --- a/contrib/kind-helm.sh +++ b/contrib/kind-helm.sh @@ -27,6 +27,7 @@ set_default_params() { export KIND_REMOVE_TAINT=${KIND_REMOVE_TAINT:-true} export ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} export ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + export ENABLE_PRE_CONF_UDN_ADDR=${ENABLE_PRE_CONF_UDN_ADDR:-false} export OVN_NETWORK_QOS_ENABLE=${OVN_NETWORK_QOS_ENABLE:-false} export KIND_NUM_WORKER=${KIND_NUM_WORKER:-2} export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ovn} @@ -99,6 +100,7 @@ usage() { echo " [ -ikv | --install-kubevirt ]" echo " [ -mne | --multi-network-enable ]" echo " [ -nse | --network-segmentation-enable ]" + echo " [ -uae | --preconfigured-udn-addresses-enable ]" echo " [ -nqe | --network-qos-enable ]" echo " [ -wk | --num-workers ]" echo " [ -ic | --enable-interconnect]" @@ -106,28 +108,29 @@ usage() { echo " [ -cn | --cluster-name ]" echo " [ -h ]" echo "" - echo "--delete Delete current cluster" - echo "-cf | --config-file Name of the KIND configuration file" - echo "-kt | --keep-taint Do not remove taint components" - echo " DEFAULT: Remove taint components" - echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled" - echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled" - echo "-obs | --observability Enable observability. DEFAULT: Disabled" - echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" - echo "-ii | --install-ingress Flag to install Ingress Components." - echo " DEFAULT: Don't install ingress components." - echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" - echo "-pl | --install-cni-plugins Install CNI plugins" - echo "-ikv | --install-kubevirt Install kubevirt" - echo "-mne | --multi-network-enable Enable multi networks. DEFAULT: Disabled" - echo "-nse | --network-segmentation-enable Enable network segmentation. DEFAULT: Disabled" - echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled" - echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled" - echo "-wk | --num-workers Number of worker nodes. DEFAULT: 2 workers" - echo "-cn | --cluster-name Configure the kind cluster's name" - echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." - echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" - echo "-npz | --nodes-per-zone Specify number of nodes per zone (Default 0, which means global zone; >0 means interconnect zone, where 1 for single-node zone, >1 for multi-node zone). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." + echo "--delete Delete current cluster" + echo "-cf | --config-file Name of the KIND configuration file" + echo "-kt | --keep-taint Do not remove taint components" + echo " DEFAULT: Remove taint components" + echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled" + echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled" + echo "-obs | --observability Enable observability. DEFAULT: Disabled" + echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" + echo "-ii | --install-ingress Flag to install Ingress Components." + echo " DEFAULT: Don't install ingress components." + echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" + echo "-pl | --install-cni-plugins Install CNI plugins" + echo "-ikv | --install-kubevirt Install kubevirt" + echo "-mne | --multi-network-enable Enable multi networks. DEFAULT: Disabled" + echo "-nse | --network-segmentation-enable Enable network segmentation. DEFAULT: Disabled" + echo "-uae | --preconfigured-udn-addresses-enable Enable connecting workloads with preconfigured network to user-defined networks. DEFAULT: Disabled" + echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled" + echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled" + echo "-wk | --num-workers Number of worker nodes. DEFAULT: 2 workers" + echo "-cn | --cluster-name Configure the kind cluster's name" + echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." + echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" + echo "-npz | --nodes-per-zone Specify number of nodes per zone (Default 0, which means global zone; >0 means interconnect zone, where 1 for single-node zone, >1 for multi-node zone). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." echo "" } @@ -168,6 +171,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -uae | --preconfigured-udn-addresses-enable) ENABLE_PRE_CONF_UDN_ADDR=true + ;; -nqe | --network-qos-enable ) OVN_NETWORK_QOS_ENABLE=true ;; -ha | --ha-enabled ) OVN_HA=true @@ -223,6 +228,7 @@ print_params() { echo "KIND_REMOVE_TAINT = $KIND_REMOVE_TAINT" echo "ENABLE_MULTI_NET = $ENABLE_MULTI_NET" echo "ENABLE_NETWORK_SEGMENTATION = $ENABLE_NETWORK_SEGMENTATION" + echo "ENABLE_PRE_CONF_UDN_ADDR = $ENABLE_PRE_CONF_UDN_ADDR" echo "OVN_NETWORK_QOS_ENABLE = $OVN_NETWORK_QOS_ENABLE" echo "OVN_IMAGE = $OVN_IMAGE" echo "KIND_NUM_MASTER = $KIND_NUM_MASTER" @@ -416,6 +422,7 @@ helm install ovn-kubernetes . -f "${value_file}" \ --set global.enableMulticast=$(if [ "${OVN_MULTICAST_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableMultiNetwork=$(if [ "${ENABLE_MULTI_NET}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableNetworkSegmentation=$(if [ "${ENABLE_NETWORK_SEGMENTATION}" == "true" ]; then echo "true"; else echo "false"; fi) \ + --set global.enablePreconfiguredUDNAddresses=$(if [ "${ENABLE_PRE_CONF_UDN_ADDR}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableHybridOverlay=$(if [ "${OVN_HYBRID_OVERLAY_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableObservability=$(if [ "${OVN_OBSERV_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.emptyLbEvents=$(if [ "${OVN_EMPTY_LB_EVENTS}" == "true" ]; then echo "true"; else echo "false"; fi) \ diff --git a/contrib/kind.sh b/contrib/kind.sh index 5ec980bd95..72c48a9af2 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -80,6 +80,7 @@ usage() { echo " [-is | --ipsec]" echo " [-cm | --compact-mode]" echo " [-ic | --enable-interconnect]" + echo " [-uae | --preconfigured-udn-addresses-enable]" echo " [-rae | --enable-route-advertisements]" echo " [-adv | --advertise-default-network]" echo " [-nqe | --network-qos-enable]" @@ -88,73 +89,74 @@ usage() { echo " [-obs | --observability]" echo " [-h]]" echo "" - echo "-cf | --config-file Name of the KIND J2 configuration file." - echo " DEFAULT: ./kind.yaml.j2" - echo "-kt | --keep-taint Do not remove taint components." - echo " DEFAULT: Remove taint components." - echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." - echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." - echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." - echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." - echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." - echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" - echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" - echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" - echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" - echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." - echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." - echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." - echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." - echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." - echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." - echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" - echo "-ii | --install-ingress Flag to install Ingress Components." - echo " DEFAULT: Don't install ingress components." - echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" - echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." - echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." - echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" - echo " nodes and no HA - 0 worker nodes." - echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" - echo " github CI to be updated with IPv6 settings." - echo " DEFAULT: Don't allow." - echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." - echo " DEFAULT: shared." - echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." - echo "-ovr | --ovn-repo Specify the repository to build OVN from" - echo "-ovg | --ovn-gitref Specify the branch, tag or commit id to build OVN from, it can be a pattern like 'branch-*' it will order results and use the first one" - echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." - echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" - echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." - echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." - echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." - echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." - echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." - echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." - echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" - echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" - echo "-cn | --cluster-name Configure the kind cluster's name" - echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" - echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." - echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" - echo "-sm | --scale-metrics Enable scale metrics" - echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." - echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" - echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled." - echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" - echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." - echo "-mtu Define the overlay mtu" - echo "--isolated Deploy with an isolated environment (no default gateway)" - echo "--delete Delete current cluster" - echo "--deploy Deploy ovn kubernetes without restarting kind" - echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." - echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." - echo "-obs | --observability Enable OVN Observability feature." - echo "-rae | --enable-route-advertisements Enable route advertisements" - echo "-adv | --advertise-default-network Applies a RouteAdvertisements configuration to advertise the default network on all nodes" - echo "" +echo "-cf | --config-file Name of the KIND J2 configuration file." +echo " DEFAULT: ./kind.yaml.j2" +echo "-kt | --keep-taint Do not remove taint components." +echo " DEFAULT: Remove taint components." +echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." +echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." +echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." +echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." +echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." +echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" +echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" +echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" +echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" +echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." +echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." +echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." +echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." +echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." +echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." +echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" +echo "-ii | --install-ingress Flag to install Ingress Components." +echo " DEFAULT: Don't install ingress components." +echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" +echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." +echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." +echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" +echo " nodes and no HA - 0 worker nodes." +echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" +echo " github CI to be updated with IPv6 settings." +echo " DEFAULT: Don't allow." +echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." +echo " DEFAULT: shared." +echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." +echo "-ovr | --ovn-repo Specify the repository to build OVN from" +echo "-ovg | --ovn-gitref Specify the branch, tag or commit id to build OVN from, it can be a pattern like 'branch-*' it will order results and use the first one" +echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." +echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" +echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." +echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." +echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." +echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." +echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." +echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." +echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." +echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." +echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" +echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" +echo "-cn | --cluster-name Configure the kind cluster's name" +echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" +echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." +echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" +echo "-sm | --scale-metrics Enable scale metrics" +echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." +echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" +echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled." +echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" +echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." +echo "-mtu Define the overlay mtu" +echo "--isolated Deploy with an isolated environment (no default gateway)" +echo "--delete Delete current cluster" +echo "--deploy Deploy ovn kubernetes without restarting kind" +echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." +echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." +echo "-obs | --observability Enable OVN Observability feature." +echo "-uae | --preconfigured-udn-addresses-enable Enable connecting workloads with preconfigured network to user-defined networks" +echo "-rae | --enable-route-advertisements Enable route advertisements" +echo "-adv | --advertise-default-network Applies a RouteAdvertisements configuration to advertise the default network on all nodes" +echo "" } parse_args() { @@ -337,6 +339,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -uae | --preconfigured-udn-addresses-enable) ENABLE_PRE_CONF_UDN_ADDR=true + ;; -rae | --route-advertisements-enable) ENABLE_ROUTE_ADVERTISEMENTS=true ;; -adv | --advertise-default-network) ADVERTISE_DEFAULT_NETWORK=true @@ -434,6 +438,7 @@ print_params() { echo "ENABLE_NETWORK_SEGMENTATION= $ENABLE_NETWORK_SEGMENTATION" echo "ENABLE_ROUTE_ADVERTISEMENTS= $ENABLE_ROUTE_ADVERTISEMENTS" echo "ADVERTISE_DEFAULT_NETWORK = $ADVERTISE_DEFAULT_NETWORK" + echo "ENABLE_PRE_CONF_UDN_ADDR = $ENABLE_PRE_CONF_UDN_ADDR" echo "OVN_ENABLE_INTERCONNECT = $OVN_ENABLE_INTERCONNECT" if [ "$OVN_ENABLE_INTERCONNECT" == true ]; then echo "KIND_NUM_NODES_PER_ZONE = $KIND_NUM_NODES_PER_ZONE" @@ -654,6 +659,11 @@ set_default_params() { fi ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + if [ "$ENABLE_NETWORK_SEGMENTATION" == true ] && [ "$ENABLE_MULTI_NET" != true ]; then + echo "Network segmentation (UDN) requires multi-network to be enabled (-mne)" + exit 1 + fi + ENABLE_ROUTE_ADVERTISEMENTS=${ENABLE_ROUTE_ADVERTISEMENTS:-false} if [ "$ENABLE_ROUTE_ADVERTISEMENTS" == true ] && [ "$ENABLE_MULTI_NET" != true ]; then echo "Route advertisements requires multi-network to be enabled (-mne)" @@ -663,6 +673,16 @@ set_default_params() { echo "Route advertisements requires interconnect to be enabled (-ic)" exit 1 fi + + ENABLE_PRE_CONF_UDN_ADDR=${ENABLE_PRE_CONF_UDN_ADDR:-false} + if [[ $ENABLE_PRE_CONF_UDN_ADDR == true && $ENABLE_NETWORK_SEGMENTATION != true ]]; then + echo "Preconfigured UDN addresses requires network-segmentation to be enabled (-nse)" + exit 1 + fi + if [[ $ENABLE_PRE_CONF_UDN_ADDR == true && $OVN_ENABLE_INTERCONNECT != true ]]; then + echo "Preconfigured UDN addresses requires interconnect to be enabled (-ic)" + exit 1 + fi ADVERTISE_DEFAULT_NETWORK=${ADVERTISE_DEFAULT_NETWORK:-false} OVN_COMPACT_MODE=${OVN_COMPACT_MODE:-false} if [ "$OVN_COMPACT_MODE" == true ]; then @@ -916,6 +936,7 @@ create_ovn_kube_manifests() { --ex-gw-network-interface="${OVN_EX_GW_NETWORK_INTERFACE}" \ --multi-network-enable="${ENABLE_MULTI_NET}" \ --network-segmentation-enable="${ENABLE_NETWORK_SEGMENTATION}" \ + --preconfigured-udn-addresses-enable="${ENABLE_PRE_CONF_UDN_ADDR}" \ --route-advertisements-enable="${ENABLE_ROUTE_ADVERTISEMENTS}" \ --advertise-default-network="${ADVERTISE_DEFAULT_NETWORK}" \ --ovnkube-metrics-scale-enable="${OVN_METRICS_SCALE_ENABLE}" \ diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 95e4a503e8..7c3daedee9 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -71,6 +71,7 @@ OVN_EGRESSSERVICE_ENABLE= OVN_DISABLE_OVN_IFACE_ID_VER="false" OVN_MULTI_NETWORK_ENABLE= OVN_NETWORK_SEGMENTATION_ENABLE= +OVN_PRE_CONF_UDN_ADDR_ENABLE= OVN_ROUTE_ADVERTISEMENTS_ENABLE= OVN_ADVERTISE_DEFAULT_NETWORK= OVN_V4_JOIN_SUBNET="" @@ -273,6 +274,9 @@ while [ "$1" != "" ]; do --network-segmentation-enable) OVN_NETWORK_SEGMENTATION_ENABLE=$VALUE ;; + --preconfigured-udn-addresses-enable) + OVN_PRE_CONF_UDN_ADDR_ENABLE=$VALUE + ;; --route-advertisements-enable) OVN_ROUTE_ADVERTISEMENTS_ENABLE=$VALUE ;; @@ -468,6 +472,8 @@ ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE} echo "ovn_multi_network_enable: ${ovn_multi_network_enable}" ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE} echo "ovn_network_segmentation_enable: ${ovn_network_segmentation_enable}" +ovn_pre_conf_udn_addr_enable=${OVN_PRE_CONF_UDN_ADDR_ENABLE} +echo "ovn_pre_conf_udn_addr_enable: ${ovn_pre_conf_udn_addr_enable}" ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE} echo "ovn_route_advertisements_enable: ${ovn_route_advertisements_enable}" ovn_advertise_default_network=${OVN_ADVERTISE_DEFAULT_NETWORK} @@ -612,6 +618,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_ip_healthcheck_port=${ovn_egress_ip_healthcheck_port} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -814,6 +821,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -894,6 +902,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -961,6 +970,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ @@ -1057,6 +1067,7 @@ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ jinjanate ../templates/rbac-ovnkube-node.yaml.j2 -o ${output_dir}/rbac-ovnkube-node.yaml ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ +ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ jinjanate ../templates/rbac-ovnkube-cluster-manager.yaml.j2 -o ${output_dir}/rbac-ovnkube-cluster-manager.yaml diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 85b8eeab14..32d3347cd3 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -269,6 +269,8 @@ ovn_disable_ovn_iface_id_ver=${OVN_DISABLE_OVN_IFACE_ID_VER:-false} ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE:-false} #OVN_NETWORK_SEGMENTATION_ENABLE - enable user defined primary networks for ovn-kubernetes ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE:=false} +#OVN_PRE_CONF_UDN_ADDR_ENABLE - enable connecting workloads with custom network configuration to UDNs +ovn_pre_conf_udn_addr_enable=${OVN_PRE_CONF_UDN_ADDR_ENABLE:=false} #OVN_NROUTE_ADVERTISEMENTS_ENABLE - enable route advertisements for ovn-kubernetes ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE:=false} ovn_acl_logging_rate_limit=${OVN_ACL_LOGGING_RATE_LIMIT:-"20"} @@ -1269,7 +1271,7 @@ ovn-master() { ovnkube_metrics_scale_enable_flag="--metrics-enable-scale --metrics-enable-pprof" fi echo "ovnkube_metrics_scale_enable_flag: ${ovnkube_metrics_scale_enable_flag}" - + ovn_stateless_netpol_enable_flag= if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" @@ -1293,7 +1295,7 @@ ovn-master() { ovn_observ_enable_flag="--enable-observability" fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" - + nohostsubnet_label_option= if [[ ${ovn_nohostsubnet_label} != "" ]]; then nohostsubnet_label_option="--no-hostsubnet-nodes=${ovn_nohostsubnet_label}" @@ -1539,6 +1541,12 @@ ovnkube-controller() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -1659,6 +1667,7 @@ ovnkube-controller() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${ovn_acl_logging_rate_limit_flag} \ ${ovn_dbs} \ @@ -1843,6 +1852,12 @@ ovnkube-controller-with-node() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -1961,7 +1976,7 @@ ovnkube-controller-with-node() { if test -z "${OVN_UNPRIVILEGED_MODE+x}" -o "x${OVN_UNPRIVILEGED_MODE}" = xno; then ovn_unprivileged_flag="" fi - + ovn_metrics_bind_address="${metrics_endpoint_ip}:${metrics_bind_port}" metrics_bind_address="${metrics_endpoint_ip}:${metrics_worker_port}" echo "ovn_metrics_bind_address=${ovn_metrics_bind_address}" @@ -2102,6 +2117,7 @@ ovnkube-controller-with-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ @@ -2269,6 +2285,12 @@ ovn-cluster-manager() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -2336,6 +2358,7 @@ ovn-cluster-manager() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${persistent_ips_enabled_flag} \ ${ovnkube_enable_interconnect_flag} \ @@ -2513,6 +2536,11 @@ ovn-node() { network_segmentation_enabled_flag="--enable-multi-network --enable-network-segmentation" fi + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -2748,6 +2776,7 @@ ovn-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ diff --git a/dist/templates/ovnkube-control-plane.yaml.j2 b/dist/templates/ovnkube-control-plane.yaml.j2 index 51e3f9319b..dccc617ebc 100644 --- a/dist/templates/ovnkube-control-plane.yaml.j2 +++ b/dist/templates/ovnkube-control-plane.yaml.j2 @@ -139,6 +139,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 98591a5ac1..2bf8e5ed2a 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -255,6 +255,10 @@ spec: - name: OVNKUBE_NODE_MODE value: "dpu" {% endif -%} + {% if ovnkube_app_name!="ovnkube-node-dpu" and ovnkube_app_name!="ovnkube-node-dpu-host" -%} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" + {% endif -%} - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: "{{ ovnkube_node_mgmt_port_netdev }}" - name: OVN_HOST_NETWORK_NAMESPACE diff --git a/dist/templates/ovnkube-single-node-zone.yaml.j2 b/dist/templates/ovnkube-single-node-zone.yaml.j2 index d2d485cca7..df5533a668 100644 --- a/dist/templates/ovnkube-single-node-zone.yaml.j2 +++ b/dist/templates/ovnkube-single-node-zone.yaml.j2 @@ -433,6 +433,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVNKUBE_NODE_MGMT_PORT_NETDEV diff --git a/dist/templates/ovnkube-zone-controller.yaml.j2 b/dist/templates/ovnkube-zone-controller.yaml.j2 index 363ade3014..cc87fe1a53 100644 --- a/dist/templates/ovnkube-zone-controller.yaml.j2 +++ b/dist/templates/ovnkube-zone-controller.yaml.j2 @@ -345,6 +345,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index 7cd97479c4..297f18b55f 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -423,6 +423,7 @@ type OVNKubernetesFeatureConfig struct { EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"` EnableMultiNetwork bool `gcfg:"enable-multi-network"` EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` + EnablePreconfiguredUDNAddresses bool `gcfg:"enable-preconfigured-udn-addresses"` EnableRouteAdvertisements bool `gcfg:"enable-route-advertisements"` // This feature requires a kernel fix https://github.com/torvalds/linux/commit/7f3287db654395f9c5ddd246325ff7889f550286 // to work on a kind cluster. Flag allows to disable it for current CI, will be turned on when github runners have this fix. @@ -1098,6 +1099,12 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation, Value: OVNKubernetesFeature.EnableNetworkSegmentation, }, + &cli.BoolFlag{ + Name: "enable-preconfigured-udn-addresses", + Usage: "Enable workloads connect to user-defined network with preconfigured addresses.", + Destination: &cliConfig.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses, + Value: OVNKubernetesFeature.EnablePreconfiguredUDNAddresses, + }, &cli.BoolFlag{ Name: "enable-route-advertisements", Usage: "Configure to use route advertisements feature with ovn-kubernetes.", diff --git a/go-controller/pkg/config/config_test.go b/go-controller/pkg/config/config_test.go index ddfaf84e65..39e7fa41bc 100644 --- a/go-controller/pkg/config/config_test.go +++ b/go-controller/pkg/config/config_test.go @@ -227,6 +227,7 @@ egressip-node-healthcheck-port=1234 enable-multi-network=false enable-multi-networkpolicy=false enable-network-segmentation=false +enable-preconfigured-udn-addresses=false enable-route-advertisements=false enable-interconnect=false enable-multi-external-gateway=false @@ -338,6 +339,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(0)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeFalse()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeFalse()) @@ -597,6 +599,7 @@ var _ = Describe("Config Operations", func() { "enable-multi-network=true", "enable-multi-networkpolicy=true", "enable-network-segmentation=true", + "enable-preconfigured-udn-addresses=true", "enable-route-advertisements=true", "enable-interconnect=true", "enable-multi-external-gateway=true", @@ -687,6 +690,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(1234)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiExternalGateway).To(gomega.BeTrue()) @@ -794,6 +798,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(4321)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) @@ -869,6 +874,7 @@ var _ = Describe("Config Operations", func() { "-enable-multi-network=true", "-enable-multi-networkpolicy=true", "-enable-network-segmentation=true", + "-enable-preconfigured-udn-addresses=true", "-enable-route-advertisements=true", "-enable-interconnect=true", "-enable-multi-external-gateway=true", diff --git a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml index 2b6edcaa8e..465a0aa665 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml @@ -126,6 +126,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_HYBRID_OVERLAY_NET_CIDR value: {{ default "" .Values.global.hybridOverlayNetCidr | quote }} - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml index e4b0a0621a..dbf6268f6a 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml @@ -229,6 +229,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_ENABLE_INTERCONNECT value: {{ hasKey .Values.global "enableInterconnect" | ternary .Values.global.enableInterconnect false | quote }} - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY diff --git a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml index d60276308b..2cd3913633 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml @@ -414,6 +414,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: {{ default "" .Values.global.nodeMgmtPortNetdev | quote }} - name: OVN_EMPTY_LB_EVENTS diff --git a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml index f692ed0524..3a437db089 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml @@ -313,6 +313,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_HYBRID_OVERLAY_NET_CIDR value: {{ default "" .Values.global.hybridOverlayNetCidr | quote }} - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/helm/ovn-kubernetes/values-multi-node-zone.yaml b/helm/ovn-kubernetes/values-multi-node-zone.yaml index 2eef44ecae..8056461256 100644 --- a/helm/ovn-kubernetes/values-multi-node-zone.yaml +++ b/helm/ovn-kubernetes/values-multi-node-zone.yaml @@ -76,6 +76,8 @@ global: enableMultiNetwork: false # -- Configure to use user defined networks (UDN) feature with ovn-kubernetes enableNetworkSegmentation: false + # -- Configure to enable workloads with preconfigured network connect to user defined networks (UDN) with ovn-kubernetes + enablePreconfiguredUDNAddresses: false # -- Configure to enable IPsec enableIpsec: false # -- Use SSL transport to NB/SB db and northd diff --git a/helm/ovn-kubernetes/values-single-node-zone.yaml b/helm/ovn-kubernetes/values-single-node-zone.yaml index 9747d45440..516b77220b 100644 --- a/helm/ovn-kubernetes/values-single-node-zone.yaml +++ b/helm/ovn-kubernetes/values-single-node-zone.yaml @@ -76,6 +76,8 @@ global: enableMultiNetwork: false # -- Configure to use user defined networks (UDN) feature with ovn-kubernetes enableNetworkSegmentation: false + # -- Configure to enable workloads with preconfigured network connect to user defined networks (UDN) with ovn-kubernetes + enablePreconfiguredUDNAddresses: false # -- Configure to enable IPsec enableIpsec: false # -- Use SSL transport to NB/SB db and northd From 6902456aec609963b12d8533f72448cbf45beb6a Mon Sep 17 00:00:00 2001 From: Xiaobin Qu Date: Thu, 10 Jul 2025 16:28:18 -0700 Subject: [PATCH 078/181] fix broken TOC links --- docs/features/network-qos-guide.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/features/network-qos-guide.md b/docs/features/network-qos-guide.md index 586368fb32..2e07a7bc31 100644 --- a/docs/features/network-qos-guide.md +++ b/docs/features/network-qos-guide.md @@ -8,7 +8,7 @@ 4. [Create Sample Pods and Verify the Configuration](#4-create-sample-pods-and-verify-the-configuration) 5. [Explain the NetworkQoS Object](#5-explain-the-networkqos-object) -## **1 Overview** +## **1 Overview** Differentiated Services Code Point (DSCP) marking and egress bandwidth metering let you prioritize or police specific traffic flows. The new **NetworkQoS** Custom Resource Definition (CRD) in [ovn-kubernetes](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/dist/templates/k8s.ovn.org_networkqoses.yaml.j2) makes both features available to Kubernetes users on **all** pod interfaces—primary or secondary—without touching pod manifests. @@ -19,7 +19,7 @@ cd contrib ./kind-helm.sh -nqe -mne ; # --enable-network-qos --enable-multi-network ``` -## **2 Create a Secondary Network** +## **2 Create a Secondary Network** File: nad.yaml @@ -46,7 +46,7 @@ spec: ``` *Why the label?* `NetworkQoS` uses a label selector to find matching NADs. Without at least one label, the selector cannot match. -## **3 Define a NetworkQoS Policy** +## **3 Define a NetworkQoS Policy** File: nqos.yaml @@ -108,7 +108,7 @@ NAME STATUS qos-external NetworkQoS Destinations applied ``` -## **4 Create Sample Pods and Verify the Configuration** +## **4 Create Sample Pods and Verify the Configuration** ### **4.1 Launch Test Pods** @@ -284,7 +284,7 @@ tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 byt 10.245.2.3 > 10.245.4.3: ICMP echo reply, id 14, seq 56, length 64 ``` -## **5 Explain the NetworkQoS Object** +## **5 Explain the NetworkQoS Object** Below is an *abbreviated* map of the CRD schema returned by `kubectl explain networkqos --recursive` (v1alpha1). Use this as a quick reference. For the definitive specification, always consult the `kubectl explain` output or the CRD YAML in the ovn-kubernetes repository. From 2edfdaf96632716022cd13db5986ee53fe777b03 Mon Sep 17 00:00:00 2001 From: Jamo Luhrsen Date: Fri, 11 Jul 2025 15:27:59 -0700 Subject: [PATCH 079/181] always() run the diags when a step fails (e.g., e2e testing) the rest of the workflow will not run unless it's tagged with always(). and when something fails is exactly when we want to get some diags. move all references to "Runner Diagnostics" to use always() Signed-off-by: Jamo Luhrsen --- .github/workflows/test.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2172e8d40d..1782c16b65 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -387,6 +387,7 @@ jobs: uses: actions/checkout@v4 - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: ovn upgrade @@ -395,6 +396,7 @@ jobs: make -C test upgrade-ovn - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run E2E shard-conformance @@ -402,6 +404,7 @@ jobs: make -C test shard-conformance - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs @@ -634,6 +637,7 @@ jobs: run: make -C test traffic-flow-tests WHAT="setup" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Tests @@ -687,6 +691,7 @@ jobs: fi - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs @@ -799,6 +804,7 @@ jobs: ./contrib/kind-dual-stack-conversion.sh - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Dual-Stack Tests @@ -806,6 +812,7 @@ jobs: make -C test shard-test WHAT="Networking Granular Checks\|DualStack" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Dual-Stack Control-Plane Tests @@ -813,6 +820,7 @@ jobs: make -C test control-plane WHAT="DualStack" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs From 7b1b7dd7073753a5ef6cc0797c25e150f7fd1a3d Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Mon, 14 Jul 2025 13:33:20 +0200 Subject: [PATCH 080/181] [OVN build] Fetch ovs version from the ovn submodule by default. You need to clone OVN first for that Signed-off-by: Nadia Pinaeva --- dist/images/Dockerfile.fedora | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index e1789bd1e5..fb3bce7abb 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -24,14 +24,28 @@ ENV PYTHONDONTWRITEBYTECODE yes RUN INSTALL_PKGS="git rpm-build dnf-plugins-core" && \ dnf install --best --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS +# Clone OVN Source Code. +ARG OVN_REPO=https://github.com/ovn-org/ovn.git +ARG OVN_GITREF=main +WORKDIR /root +RUN mkdir ovn && pushd ovn && \ + git init && \ + git remote add origin $OVN_REPO && \ + git fetch origin ${OVN_GITREF} --depth 1 && \ + git reset --hard FETCH_HEAD && \ + popd + # Clone OVS Source Code. ARG OVS_REPO=https://github.com/openvswitch/ovs.git -ARG OVS_GITREF=branch-* +# OVS_GITREF can be set to a specific commit or branch, otherwise the version pinned by OVN will be used. +ARG OVS_GITREF="" WORKDIR /root -RUN mkdir ovs && pushd ovs && \ +RUN OVS_OVN_GITREF=$(cd ovn && git submodule status ovs|cut -c 2-|cut -d ' ' -f 1) && \ + mkdir ovs && pushd ovs && \ git init && \ git remote add origin $OVS_REPO && \ - git fetch $OVS_REPO $(git ls-remote origin "${OVS_GITREF}" | sort -V -k2 | tail -1 | awk '{print $1}') --depth 1 && \ + OVS_GITREF="${OVS_GITREF:-$OVS_OVN_GITREF}" && \ + git fetch $OVS_REPO ${OVS_GITREF} --depth 1 && \ git reset --hard FETCH_HEAD && \ echo "1" && \ find rhel && \ @@ -48,16 +62,6 @@ RUN rm rpm/rpmbuild/RPMS/x86_64/*debug* RUN rm rpm/rpmbuild/RPMS/x86_64/*devel* RUN git log -n 1 -# Clone OVN Source Code. -ARG OVN_REPO=https://github.com/ovn-org/ovn.git -ARG OVN_GITREF=main -WORKDIR /root -RUN mkdir ovn && pushd ovn && \ - git init && \ - git remote add origin $OVN_REPO && \ - git fetch origin ${OVN_GITREF} --depth 1 && \ - git reset --hard FETCH_HEAD && \ - popd # Build OVN rpms. WORKDIR /root/ovn/ From 1272c122997432095dd87f821d60c75fcad65425 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Thu, 10 Jul 2025 17:13:00 -0700 Subject: [PATCH 081/181] cluster-manager clbs if udn requests PersistentIPs but it is not enabled The log shows 'failed to run ovnkube: failed to start cluster manager: initial sync failed: failed to sync network cluster_udn_test-net: [clustermanager-nad-controller network controller]: failed to ensure network cluster_udn_test-net: failed to start network cluster) cluster_udn_test-net: failed to initialize pod ip allocator: network "cluster_udn_test-net" allows persistent IPs but missing the claims reconciler' Signed-off-by: Yun Zhou --- .../userdefinednetwork/template/net-attach-def-template.go | 3 +++ .../template/net-attach-def-template_test.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go index a06e7085ed..0b3aa61194 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go @@ -166,6 +166,9 @@ func renderCNINetworkConfig(networkName, nadName string, spec SpecGetter) (map[s netConfSpec.VLANID = int(cfg.VLAN.Access.ID) } } + if netConfSpec.AllowPersistentIPs && !config.OVNKubernetesFeature.EnablePersistentIPs { + return nil, fmt.Errorf("allowPersistentIPs is set but persistentIPs is Disabled") + } if err := util.ValidateNetConf(nadName, netConfSpec); err != nil { return nil, err diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go index 68f2e4022a..ab0593e210 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go @@ -301,6 +301,7 @@ var _ = Describe("NetAttachDefTemplate", func() { // must be defined so the primary user defined network can match the ip families of the underlying cluster config.IPv4Mode = true config.IPv6Mode = true + config.OVNKubernetesFeature.EnablePersistentIPs = true nad, err := RenderNetAttachDefManifest(testUdn, testNs) Expect(err).NotTo(HaveOccurred()) Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) @@ -436,6 +437,7 @@ var _ = Describe("NetAttachDefTemplate", func() { // must be defined so the primary user defined network can match the ip families of the underlying cluster config.IPv4Mode = true config.IPv6Mode = true + config.OVNKubernetesFeature.EnablePersistentIPs = true nad, err := RenderNetAttachDefManifest(cudn, testNs) Expect(err).NotTo(HaveOccurred()) Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) From 465e00a0758512de22f1a68f84d42862b3f60f5e Mon Sep 17 00:00:00 2001 From: Or Shoval Date: Wed, 9 Jul 2025 09:37:03 +0300 Subject: [PATCH 082/181] images: Use Quay instead docker.io Docker.io has rate limits, hence use Quay instead. Signed-off-by: Or Shoval --- dist/images/Dockerfile.fedora | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index fb3bce7abb..49b8da6872 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -14,7 +14,7 @@ ARG OVN_FROM=koji ############################################# # Stage to get OVN and OVS RPMs from source # ############################################# -FROM fedora:41 AS ovnbuilder +FROM quay.io/fedora/fedora:41 AS ovnbuilder USER root @@ -78,7 +78,7 @@ RUN git log -n 1 ######################################## # Stage to download OVN RPMs from koji # ######################################## -FROM fedora:41 AS kojidownloader +FROM quay.io/fedora/fedora:41 AS kojidownloader ARG ovnver=ovn-24.09.2-71.fc41 USER root @@ -99,14 +99,14 @@ RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] || [ -z "$TARGETPLATFORM"] ; then k ###################################### # Stage to copy OVN RPMs from source # ###################################### -FROM fedora:41 AS source +FROM quay.io/fedora/fedora:41 AS source COPY --from=ovnbuilder /root/ovn/rpm/rpmbuild/RPMS/x86_64/*.rpm / COPY --from=ovnbuilder /root/ovs/rpm/rpmbuild/RPMS/x86_64/*.rpm / #################################### # Stage to copy OVN RPMs from koji # #################################### -FROM fedora:41 AS koji +FROM quay.io/fedora/fedora:41 AS koji COPY --from=kojidownloader /*.rpm / From 10f14edb9952e11cecd97d6e57632f0341f8d3a7 Mon Sep 17 00:00:00 2001 From: Geo Turcsanyi Date: Mon, 14 Jul 2025 18:53:30 +0200 Subject: [PATCH 083/181] update jinjanate install to use pipx and mention pipx and skopeo as dependencies Signed-off-by: Geo Turcsanyi --- contrib/kind.sh | 10 +++++----- .../launching-ovn-kubernetes-on-kind.md | 19 ++++++------------- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/contrib/kind.sh b/contrib/kind.sh index 7fa446b97c..af1c0f537c 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -459,8 +459,8 @@ print_params() { install_jinjanator_renderer() { # ensure jinjanator renderer installed - pip install wheel --user - pip freeze | grep jinjanator || pip install jinjanator[yaml] --user + pipx install jinjanator[yaml] + pipx ensurepath --force >/dev/null export PATH=~/.local/bin:$PATH } @@ -499,11 +499,11 @@ check_dependencies() { fi if ! command_exists jinjanate ; then - if ! command_exists pip ; then - echo "Dependency not met: 'jinjanator' not installed and cannot install with 'pip'" + if ! command_exists pipx ; then + echo "Dependency not met: 'jinjanator' not installed and cannot install with 'pipx'" exit 1 fi - echo "'jinjanate' not found, installing with 'pip'" + echo "'jinjanate' not found, installing with 'pipx'" install_jinjanator_renderer fi diff --git a/docs/installation/launching-ovn-kubernetes-on-kind.md b/docs/installation/launching-ovn-kubernetes-on-kind.md index 5c61f3a9cd..1a6ae11a15 100644 --- a/docs/installation/launching-ovn-kubernetes-on-kind.md +++ b/docs/installation/launching-ovn-kubernetes-on-kind.md @@ -14,20 +14,19 @@ KIND (Kubernetes in Docker) deployment of OVN kubernetes is a fast and easy mean sudo firewall-cmd --permanent --add-port=11337/tcp; sudo firewall-cmd --reload ``` - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Python and pip +- Python 3 and [pipx](https://pipx.pypa.io/stable/installation/) - jq - openssl - openvswitch - Go 1.23.0 or above - -**NOTE :** In certain operating systems such as CentOS 8.x, pip2 and pip3 binaries are installed instead of pip. In such situations create a softlink for "pip" that points to "pip2". +- For podman users: skopeo For OVN kubernetes KIND deployment, use the `kind.sh` script. First Download and build the OVN-Kubernetes repo: -``` -git clone https://github.com/ovn-kubernetes/ovn-kubernetes.git; +```shell +git clone https://github.com/ovn-kubernetes/ovn-kubernetes.git cd ovn-kubernetes ``` The `kind.sh` script builds OVN-Kubernetes into a container image. To verify @@ -54,13 +53,6 @@ $ ./kind.sh $ popd ``` -**NOTE:** If you run into issues with installing jinjanate on Ubuntu due to [PEP-0668](https://peps.python.org/pep-0668/) you can work around via: -``` -sudo apt-get install pipx -pipx install jinjanator[yaml] -pipx ensurepath -``` - ### Run the KIND deployment with podman To verify local changes, the steps are mostly the same as with docker, except the `fedora` make target: @@ -87,8 +79,9 @@ To deploy KIND however, you need to start it as root and then copy root's kube c ``` $ pushd contrib $ sudo ./kind.sh -ep podman +$ mkdir -p ~/.kube $ sudo cp /root/ovn.conf ~/.kube/kind-config -$ sudo chown $(id -u):$(id -g) -R ~/.kube +$ sudo chown $(id -u):$(id -g) ~/.kube/kind-config $ export KUBECONFIG=~/.kube/kind-config $ popd ``` From ab24f2552fb0ef75872049e5ee412ddafd297598 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 10 Jul 2025 10:35:53 +0200 Subject: [PATCH 084/181] [gateway] cleanup: do some equivalent changes to simplify the code. Move stuff evaluation closer to where it is used. Replace gatewayRouter with gw.gwRouterName (why would you use this indirection?) Rename logicalRouter to gwRouter Join 2 very similar loops for gwLRPIPs and gwLRPJoinIPs (they even had exactly the same comment) Remove deleting GatewayRouter LRP as the GatewayRouter itself is deleted a coupled lines below. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 151 +++++++++++++------------------ 1 file changed, 65 insertions(+), 86 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 54005e4301..cefd275782 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -254,27 +254,7 @@ func (gw *GatewayManager) GatewayInit( enableGatewayMTU bool, ) error { - gwLRPIPs := make([]net.IP, 0) - for _, gwLRPJoinIP := range gwLRPJoinIPs { - gwLRPIPs = append(gwLRPIPs, gwLRPJoinIP.IP) - } - if gw.netInfo.TopologyType() == types.Layer2Topology { - // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need - // to configure here the .1 address, this will work only for IC with - // one node per zone, since ARPs for .1 will not go beyond local switch. - // This is being done to add the ICMP SNATs for .1 podSubnet that OVN GR generates - for _, subnet := range hostSubnets { - gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet).IP) - } - } - // Create a gateway router. - gatewayRouter := gw.gwRouterName - physicalIPs := make([]string, len(l3GatewayConfig.IPAddresses)) - for i, ip := range l3GatewayConfig.IPAddresses { - physicalIPs[i] = ip.IP.String() - } - dynamicNeighRouters := "true" if config.OVNKubernetesFeature.EnableInterconnect { dynamicNeighRouters = "false" @@ -305,6 +285,10 @@ func (gw *GatewayManager) GatewayInit( } logicalRouterOptions["lb_force_snat_ip"] = strings.Join(joinIPDualStack, " ") } + physicalIPs := make([]string, len(l3GatewayConfig.IPAddresses)) + for i, ip := range l3GatewayConfig.IPAddresses { + physicalIPs[i] = ip.IP.String() + } logicalRouterExternalIDs := map[string]string{ "physical_ip": physicalIPs[0], "physical_ips": strings.Join(physicalIPs, ","), @@ -314,27 +298,27 @@ func (gw *GatewayManager) GatewayInit( maps.Copy(logicalRouterExternalIDs, util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo)) } - logicalRouter := nbdb.LogicalRouter{ - Name: gatewayRouter, + gwRouter := nbdb.LogicalRouter{ + Name: gw.gwRouterName, Options: logicalRouterOptions, ExternalIDs: logicalRouterExternalIDs, Copp: &gw.coppUUID, } if gw.clusterLoadBalancerGroupUUID != "" { - logicalRouter.LoadBalancerGroup = []string{gw.clusterLoadBalancerGroupUUID} + gwRouter.LoadBalancerGroup = []string{gw.clusterLoadBalancerGroupUUID} if l3GatewayConfig.NodePortEnable && gw.routerLoadBalancerGroupUUID != "" { // add routerLoadBalancerGroupUUID to the gateway router only if nodePort is enabled - logicalRouter.LoadBalancerGroup = append(logicalRouter.LoadBalancerGroup, gw.routerLoadBalancerGroupUUID) + gwRouter.LoadBalancerGroup = append(gwRouter.LoadBalancerGroup, gw.routerLoadBalancerGroupUUID) } } // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, // so let's save the old value before we update the router for later use var oldExtIPs []net.IP - oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &logicalRouter) + oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &gwRouter) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed in retrieving %s, error: %v", gatewayRouter, err) + return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) } if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { @@ -352,14 +336,14 @@ func (gw *GatewayManager) GatewayInit( } } - err = libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &logicalRouter, &logicalRouter.Options, - &logicalRouter.ExternalIDs, &logicalRouter.LoadBalancerGroup, &logicalRouter.Copp) + err = libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &gwRouter, &gwRouter.Options, + &gwRouter.ExternalIDs, &gwRouter.LoadBalancerGroup, &gwRouter.Copp) if err != nil { - return fmt.Errorf("failed to create logical router %+v: %v", logicalRouter, err) + return fmt.Errorf("failed to create logical router %+v: %v", gwRouter, err) } - gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gatewayRouter - gwRouterPort := types.GWRouterToJoinSwitchPrefix + gatewayRouter + gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName + gwRouterPort := types.GWRouterToJoinSwitchPrefix + gw.gwRouterName // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. // Ensure that the ports are named appropriately, this is important for the logical router policies @@ -383,23 +367,23 @@ func (gw *GatewayManager) GatewayInit( types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } - if gw.netInfo.TopologyType() == types.Layer2Topology { - node, err := gw.watchFactory.GetNode(nodeName) - if err != nil { - return fmt.Errorf("failed to fetch node %s from watch factory %w", node, err) - } - tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) - if err != nil { - if util.IsAnnotationNotSetError(err) { - // remote node may not have the annotation yet, suppress it - return types.NewSuppressedError(err) - } - // Don't consider this node as cluster-manager has not allocated node id yet. - return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", - nodeName, gw.netInfo.GetNetworkName(), err) + } + if gw.netInfo.TopologyType() == types.Layer2Topology { + node, err := gw.watchFactory.GetNode(nodeName) + if err != nil { + return fmt.Errorf("failed to fetch node %s from watch factory %w", node, err) + } + tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) } - logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) + // Don't consider this node as cluster-manager has not allocated node id yet. + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", + nodeName, gw.netInfo.GetNetworkName(), err) } + logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) } sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) @@ -407,19 +391,23 @@ func (gw *GatewayManager) GatewayInit( return fmt.Errorf("failed to create port %v on logical switch %q: %v", gwSwitchPort, sw.Name, err) } - gwLRPMAC := util.IPAddrToHWAddr(gwLRPIPs[0]) + gwLRPIPs := make([]net.IP, 0) gwLRPNetworks := []string{} for _, gwLRPJoinIP := range gwLRPJoinIPs { + gwLRPIPs = append(gwLRPIPs, gwLRPJoinIP.IP) gwLRPNetworks = append(gwLRPNetworks, gwLRPJoinIP.String()) } if gw.netInfo.TopologyType() == types.Layer2Topology { // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need // to configure here the .1 address, this will work only for IC with // one node per zone, since ARPs for .1 will not go beyond local switch. + // This is being done to add the ICMP SNATs for .1 podSubnet that OVN GR generates for _, subnet := range hostSubnets { + gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet).IP) gwLRPNetworks = append(gwLRPNetworks, util.GetNodeGatewayIfAddr(subnet).String()) } } + gwLRPMAC := util.IPAddrToHWAddr(gwLRPIPs[0]) var options map[string]string if enableGatewayMTU { @@ -453,11 +441,11 @@ func (gw *GatewayManager) GatewayInit( } } - err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, &logicalRouter, + err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, &gwRouter, &logicalRouterPort, nil, &logicalRouterPort.MAC, &logicalRouterPort.Networks, &logicalRouterPort.Options) if err != nil { - return fmt.Errorf("failed to create port %+v on router %+v: %v", logicalRouterPort, logicalRouter, err) + return fmt.Errorf("failed to create port %+v on router %+v: %v", logicalRouterPort, gwRouter, err) } if len(drLRPIfAddrs) > 0 { for _, entry := range clusterIPSubnet { @@ -465,7 +453,7 @@ func (gw *GatewayManager) GatewayInit( if err != nil { return fmt.Errorf("failed to add a static route in GR %s with distributed "+ "router as the nexthop: %v", - gatewayRouter, err) + gw.gwRouterName, err) } // TODO There has to be a better way to do this. It seems like the @@ -476,9 +464,9 @@ func (gw *GatewayManager) GatewayInit( // a better way to do it. Adding support for indirection in ModelClients // opModel (being able to operate on thins pointed to from another model) // would be a great way to simplify this. - updatedLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &logicalRouter) + updatedGWRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &gwRouter) if err != nil { - return fmt.Errorf("unable to retrieve logical router %+v: %v", logicalRouter, err) + return fmt.Errorf("unable to retrieve logical router %+v: %v", gwRouter, err) } lrsr := nbdb.LogicalRouterStaticRoute{ @@ -493,19 +481,19 @@ func (gw *GatewayManager) GatewayInit( } p := func(item *nbdb.LogicalRouterStaticRoute) bool { return item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(item.Policy, lrsr.Policy) && - util.SliceHasStringItem(updatedLogicalRouter.StaticRoutes, item.UUID) + util.SliceHasStringItem(updatedGWRouter.StaticRoutes, item.UUID) } - err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, p, + err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("failed to add a static route %+v in GR %s with distributed router as the nexthop, err: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("failed to add a static route %+v in GR %s with distributed router as the nexthop, err: %v", lrsr, gw.gwRouterName, err) } } } if err := gw.addExternalSwitch("", l3GatewayConfig.InterfaceID, - gatewayRouter, + gw.gwRouterName, l3GatewayConfig.MACAddress.String(), physNetName(gw.netInfo), l3GatewayConfig.IPAddresses, @@ -516,7 +504,7 @@ func (gw *GatewayManager) GatewayInit( if l3GatewayConfig.EgressGWInterfaceID != "" { if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, l3GatewayConfig.EgressGWInterfaceID, - gatewayRouter, + gw.gwRouterName, l3GatewayConfig.EgressGWMACAddress.String(), types.PhysicalNetworkExGwName, l3GatewayConfig.EgressGWIPAddresses, @@ -525,16 +513,16 @@ func (gw *GatewayManager) GatewayInit( } } - externalRouterPort := types.GWRouterToExtSwitchPrefix + gatewayRouter + externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName nextHops := l3GatewayConfig.NextHops // Remove stale OVN resources with any old masquerade IP - if err := deleteStaleMasqueradeResources(gw.nbClient, gatewayRouter, nodeName, gw.watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gw.nbClient, gw.gwRouterName, nodeName, gw.watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) } - if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gatewayRouter, gw.netInfo); err != nil { + if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo); err != nil { return err } @@ -560,10 +548,10 @@ func (gw *GatewayManager) GatewayInit( return item.OutputPort != nil && *item.OutputPort == *lrsr.OutputPort && item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(item.Policy, lrsr.Policy) } - err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, p, + err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) } } // Add default gateway routes in GR @@ -590,10 +578,10 @@ func (gw *GatewayManager) GatewayInit( return item.OutputPort != nil && *item.OutputPort == *lrsr.OutputPort && item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(lrsr.Policy, item.Policy) } - err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, + err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("error creating static route %+v in GR %s: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("error creating static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) } } @@ -718,7 +706,7 @@ func (gw *GatewayManager) GatewayInit( for _, externalIP := range externalIPs { oldExternalIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(externalIP), oldExtIPs) if err != nil { - return fmt.Errorf("failed to update GW SNAT rule for pods on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update GW SNAT rule for pods on router %s error: %v", gw.gwRouterName, err) } if externalIP.String() == oldExternalIP.String() { // no external ip change, skip @@ -740,7 +728,7 @@ func (gw *GatewayManager) GatewayInit( joinIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(parsedLogicalIP), gwLRPIPs) if err != nil { return fmt.Errorf("failed to find valid IP family match for join subnet IP: %s on "+ - "gateway router: %s, provided IPs: %#v", parsedLogicalIP, gatewayRouter, gwLRPIPs) + "gateway router: %s, provided IPs: %#v", parsedLogicalIP, gw.gwRouterName, gwLRPIPs) } if nat.LogicalIP != joinIP.String() { // needs to be updated @@ -754,9 +742,9 @@ func (gw *GatewayManager) GatewayInit( } if len(natsToUpdate) > 0 { - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, natsToUpdate...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, natsToUpdate...) if err != nil { - return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } @@ -773,7 +761,7 @@ func (gw *GatewayManager) GatewayInit( externalIP, err := util.MatchIPFamily(utilnet.IsIPv6(gwLRPIP), externalIPs) if err != nil { return fmt.Errorf("failed to find valid external IP family match for join subnet IP: %s on "+ - "gateway router: %s", gwLRPIP, gatewayRouter) + "gateway router: %s", gwLRPIP, gw.gwRouterName) } joinIPNet, err := util.GetIPNetFullMask(gwLRPIP.String()) if err != nil { @@ -782,9 +770,9 @@ func (gw *GatewayManager) GatewayInit( nat := libovsdbops.BuildSNAT(&externalIP[0], joinIPNet, "", extIDs) joinNATs = append(joinNATs, nat) } - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, joinNATs...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, joinNATs...) if err != nil { - return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gw.gwRouterName, err) } nats := make([]*nbdb.NAT, 0, len(clusterIPSubnet)) @@ -796,15 +784,15 @@ func (gw *GatewayManager) GatewayInit( externalIP, err := util.MatchIPFamily(utilnet.IsIPv6CIDR(entry), externalIPs) if err != nil { return fmt.Errorf("failed to create default SNAT rules for gateway router %s: %v", - gatewayRouter, err) + gw.gwRouterName, err) } nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, nats...) + err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, nats...) if err != nil { - return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } else { // ensure we do not have any leftover SNAT entries after an upgrade @@ -812,9 +800,9 @@ func (gw *GatewayManager) GatewayInit( nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.DeleteNATs(gw.nbClient, &logicalRouter, nats...) + err := libovsdbops.DeleteNATs(gw.nbClient, &gwRouter, nats...) if err != nil { - return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } @@ -1115,16 +1103,6 @@ func (gw *GatewayManager) Cleanup() error { return fmt.Errorf("failed to delete logical switch port %s from switch %s: %w", portName, sw.Name, err) } - // Remove the logical router port on the gateway router that connects to the join switch - logicalRouter := nbdb.LogicalRouter{Name: gw.gwRouterName} - logicalRouterPort := nbdb.LogicalRouterPort{ - Name: gwRouterToJoinSwitchPortName, - } - err = libovsdbops.DeleteLogicalRouterPorts(gw.nbClient, &logicalRouter, &logicalRouterPort) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed to delete port %s on router %s: %w", logicalRouterPort.Name, gw.gwRouterName, err) - } - // Remove the static mac bindings of the gateway router err = gateway.DeleteDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo) if err != nil { @@ -1132,6 +1110,7 @@ func (gw *GatewayManager) Cleanup() error { } // Remove the gateway router associated with nodeName + logicalRouter := nbdb.LogicalRouter{Name: gw.gwRouterName} err = libovsdbops.DeleteLogicalRouter(gw.nbClient, &logicalRouter) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return fmt.Errorf("failed to delete gateway router %s: %w", gw.gwRouterName, err) From 6e38032cce5c876dc3a0269ffab9b1d6386fb54b Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 10 Jul 2025 10:41:49 +0200 Subject: [PATCH 085/181] [gateway] create createGWRouter function from GatewayInit Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 55 +++++++++++++++++++------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index cefd275782..266492809c 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -242,18 +242,7 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I return nil } -// GatewayInit creates a gateway router for the local chassis. -// enableGatewayMTU enables options:gateway_mtu for gateway routers. -func (gw *GatewayManager) GatewayInit( - nodeName string, - clusterIPSubnet []*net.IPNet, - hostSubnets []*net.IPNet, - l3GatewayConfig *util.L3GatewayConfig, - gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, - externalIPs []net.IP, - enableGatewayMTU bool, -) error { - +func (gw *GatewayManager) createGWRouter(l3GatewayConfig *util.L3GatewayConfig, gwLRPJoinIPs []*net.IPNet) (*nbdb.LogicalRouter, error) { // Create a gateway router. dynamicNeighRouters := "true" if config.OVNKubernetesFeature.EnableInterconnect { @@ -313,10 +302,33 @@ func (gw *GatewayManager) GatewayInit( } } + err := libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &gwRouter, &gwRouter.Options, + &gwRouter.ExternalIDs, &gwRouter.LoadBalancerGroup, &gwRouter.Copp) + if err != nil { + return nil, fmt.Errorf("failed to create logical router %+v: %v", gwRouter, err) + } + return &gwRouter, nil +} + +// GatewayInit creates a gateway router for the local chassis. +// enableGatewayMTU enables options:gateway_mtu for gateway routers. +func (gw *GatewayManager) GatewayInit( + nodeName string, + clusterIPSubnet []*net.IPNet, + hostSubnets []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, + gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, + externalIPs []net.IP, + enableGatewayMTU bool, +) error { + // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, // so let's save the old value before we update the router for later use var oldExtIPs []net.IP - oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &gwRouter) + oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, + &nbdb.LogicalRouter{ + Name: gw.gwRouterName, + }) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) } @@ -336,10 +348,9 @@ func (gw *GatewayManager) GatewayInit( } } - err = libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &gwRouter, &gwRouter.Options, - &gwRouter.ExternalIDs, &gwRouter.LoadBalancerGroup, &gwRouter.Copp) + gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) if err != nil { - return fmt.Errorf("failed to create logical router %+v: %v", gwRouter, err) + return err } gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName @@ -441,7 +452,7 @@ func (gw *GatewayManager) GatewayInit( } } - err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, &gwRouter, + err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, gwRouter, &logicalRouterPort, nil, &logicalRouterPort.MAC, &logicalRouterPort.Networks, &logicalRouterPort.Options) if err != nil { @@ -464,7 +475,7 @@ func (gw *GatewayManager) GatewayInit( // a better way to do it. Adding support for indirection in ModelClients // opModel (being able to operate on thins pointed to from another model) // would be a great way to simplify this. - updatedGWRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &gwRouter) + updatedGWRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, gwRouter) if err != nil { return fmt.Errorf("unable to retrieve logical router %+v: %v", gwRouter, err) } @@ -742,7 +753,7 @@ func (gw *GatewayManager) GatewayInit( } if len(natsToUpdate) > 0 { - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, natsToUpdate...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, natsToUpdate...) if err != nil { return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } @@ -770,7 +781,7 @@ func (gw *GatewayManager) GatewayInit( nat := libovsdbops.BuildSNAT(&externalIP[0], joinIPNet, "", extIDs) joinNATs = append(joinNATs, nat) } - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, joinNATs...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, joinNATs...) if err != nil { return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gw.gwRouterName, err) } @@ -790,7 +801,7 @@ func (gw *GatewayManager) GatewayInit( nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, &gwRouter, nats...) + err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, nats...) if err != nil { return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } @@ -800,7 +811,7 @@ func (gw *GatewayManager) GatewayInit( nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.DeleteNATs(gw.nbClient, &gwRouter, nats...) + err := libovsdbops.DeleteNATs(gw.nbClient, gwRouter, nats...) if err != nil { return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } From f54436cdc67cb46b8542dba81270a1ac68d59f7f Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 10 Jul 2025 11:02:19 +0200 Subject: [PATCH 086/181] [gateway] move gw router and its port creation to functions. Localize joinSwitch-related name fetching in methods Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 162 ++++++++++++++++++------------- 1 file changed, 92 insertions(+), 70 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 266492809c..ae8af7aa9f 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -310,67 +310,39 @@ func (gw *GatewayManager) createGWRouter(l3GatewayConfig *util.L3GatewayConfig, return &gwRouter, nil } -// GatewayInit creates a gateway router for the local chassis. -// enableGatewayMTU enables options:gateway_mtu for gateway routers. -func (gw *GatewayManager) GatewayInit( - nodeName string, - clusterIPSubnet []*net.IPNet, - hostSubnets []*net.IPNet, - l3GatewayConfig *util.L3GatewayConfig, - gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, - externalIPs []net.IP, - enableGatewayMTU bool, -) error { - - // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, - // so let's save the old value before we update the router for later use - var oldExtIPs []net.IP - oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, - &nbdb.LogicalRouter{ - Name: gw.gwRouterName, - }) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) - } - - if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { - if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { - oldExternalIPs := strings.Split(physicalIPs, ",") - oldExtIPs = make([]net.IP, len(oldExternalIPs)) - for i, oldExternalIP := range oldExternalIPs { - cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) - } - oldExtIPs[i] = ip - } - } - } - - gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) - if err != nil { - return err +func (gw *GatewayManager) getGWRouterPeerPortName() string { + // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. + // Ensure that the ports are named appropriately, this is important for the logical router policies + // created for local node access. + // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 + if gw.netInfo.TopologyType() == types.Layer2Topology { + return types.SwitchToRouterPrefix + gw.joinSwitchName } - gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName - gwRouterPort := types.GWRouterToJoinSwitchPrefix + gw.gwRouterName + return types.JoinSwitchToGWRouterPrefix + gw.gwRouterName +} +func (gw *GatewayManager) getGWRouterPortName() string { // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. // Ensure that the ports are named appropriately, this is important for the logical router policies // created for local node access. // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 if gw.netInfo.TopologyType() == types.Layer2Topology { - gwSwitchPort = types.SwitchToRouterPrefix + gw.joinSwitchName - gwRouterPort = types.RouterToSwitchPrefix + gw.joinSwitchName + return types.RouterToSwitchPrefix + gw.joinSwitchName } + return types.GWRouterToJoinSwitchPrefix + gw.gwRouterName +} + +func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { + gwSwitchPort := gw.getGWRouterPeerPortName() + gwRouterPortName := gw.getGWRouterPortName() logicalSwitchPort := nbdb.LogicalSwitchPort{ Name: gwSwitchPort, Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": gwRouterPort, + "router-port": gwRouterPortName, }, } if gw.netInfo.IsSecondary() { @@ -397,11 +369,15 @@ func (gw *GatewayManager) GatewayInit( logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) } sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} - err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) + err := libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) if err != nil { return fmt.Errorf("failed to create port %v on logical switch %q: %v", gwSwitchPort, sw.Name, err) } + return err +} +func (gw *GatewayManager) createGWRouterPort(hostSubnets []*net.IPNet, gwLRPJoinIPs []*net.IPNet, + enableGatewayMTU bool, gwRouter *nbdb.LogicalRouter) ([]net.IP, error) { gwLRPIPs := make([]net.IP, 0) gwLRPNetworks := []string{} for _, gwLRPJoinIP := range gwLRPJoinIPs { @@ -426,20 +402,21 @@ func (gw *GatewayManager) GatewayInit( "gateway_mtu": strconv.Itoa(config.Default.MTU), } } - logicalRouterPort := nbdb.LogicalRouterPort{ - Name: gwRouterPort, + + gwRouterPort := nbdb.LogicalRouterPort{ + Name: gw.getGWRouterPortName(), MAC: gwLRPMAC.String(), Networks: gwLRPNetworks, Options: options, } if gw.netInfo.IsSecondary() { - logicalRouterPort.ExternalIDs = map[string]string{ + gwRouterPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } _, isNetIPv6 := gw.netInfo.IPMode() if gw.netInfo.TopologyType() == types.Layer2Topology && isNetIPv6 && config.IPv6Mode { - logicalRouterPort.Ipv6RaConfigs = map[string]string{ + gwRouterPort.Ipv6RaConfigs = map[string]string{ "address_mode": "dhcpv6_stateful", "send_periodic": "true", "max_interval": "900", // 15 minutes @@ -447,17 +424,72 @@ func (gw *GatewayManager) GatewayInit( "router_preference": "LOW", // The static gateway configured by CNI is MEDIUM, so make this SLOW so it has less effect for pods } if gw.netInfo.MTU() > 0 { - logicalRouterPort.Ipv6RaConfigs["mtu"] = fmt.Sprintf("%d", gw.netInfo.MTU()) + gwRouterPort.Ipv6RaConfigs["mtu"] = fmt.Sprintf("%d", gw.netInfo.MTU()) } } } - err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, gwRouter, - &logicalRouterPort, nil, &logicalRouterPort.MAC, &logicalRouterPort.Networks, - &logicalRouterPort.Options) + err := libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, gwRouter, + &gwRouterPort, nil, &gwRouterPort.MAC, &gwRouterPort.Networks, + &gwRouterPort.Options) if err != nil { - return fmt.Errorf("failed to create port %+v on router %+v: %v", logicalRouterPort, gwRouter, err) + return nil, fmt.Errorf("failed to create port %+v on router %+v: %v", gwRouterPort, gwRouter, err) } + return gwLRPIPs, nil +} + +// GatewayInit creates a gateway router for the local chassis. +// enableGatewayMTU enables options:gateway_mtu for gateway routers. +func (gw *GatewayManager) GatewayInit( + nodeName string, + clusterIPSubnet []*net.IPNet, + hostSubnets []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, + gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, + externalIPs []net.IP, + enableGatewayMTU bool, +) error { + + // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, + // so let's save the old value before we update the router for later use + var oldExtIPs []net.IP + oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, + &nbdb.LogicalRouter{ + Name: gw.gwRouterName, + }) + if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) + } + + if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { + if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { + oldExternalIPs := strings.Split(physicalIPs, ",") + oldExtIPs = make([]net.IP, len(oldExternalIPs)) + for i, oldExternalIP := range oldExternalIPs { + cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) + } + oldExtIPs[i] = ip + } + } + } + + gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) + if err != nil { + return err + } + + if err = gw.createGWRouterPeerPort(nodeName); err != nil { + return err + } + + gwLRPIPs, err := gw.createGWRouterPort(hostSubnets, gwLRPJoinIPs, enableGatewayMTU, gwRouter) + if err != nil { + return err + } + if len(drLRPIfAddrs) > 0 { for _, entry := range clusterIPSubnet { drLRPIfAddr, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(entry), drLRPIfAddrs) @@ -525,9 +557,6 @@ func (gw *GatewayManager) GatewayInit( } externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName - - nextHops := l3GatewayConfig.NextHops - // Remove stale OVN resources with any old masquerade IP if err := deleteStaleMasqueradeResources(gw.nbClient, gw.gwRouterName, nodeName, gw.watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) @@ -565,6 +594,8 @@ func (gw *GatewayManager) GatewayInit( return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) } } + + nextHops := l3GatewayConfig.NextHops // Add default gateway routes in GR for _, nextHop := range nextHops { var allIPs string @@ -1078,17 +1109,8 @@ func (gw *GatewayManager) Cleanup() error { // Get the gateway router port's IP address (connected to join switch) var nextHops []net.IP - gwRouterToJoinSwitchPortName := types.GWRouterToJoinSwitchPrefix + gw.gwRouterName - portName := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName - - // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. - // Ensure that the ports are named appropriately, this is important for the logical router policies - // created for local node access. - // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 - if gw.netInfo.TopologyType() == types.Layer2Topology { - gwRouterToJoinSwitchPortName = types.RouterToSwitchPrefix + gw.joinSwitchName - portName = types.SwitchToRouterPrefix + gw.joinSwitchName - } + gwRouterToJoinSwitchPortName := gw.getGWRouterPortName() + portName := gw.getGWRouterPeerPortName() gwIPAddrs, err := libovsdbutil.GetLRPAddrs(gw.nbClient, gwRouterToJoinSwitchPortName) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { From f392ef0e6f3e2cc6a556aad690c4b3a657106f14 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 10 Jul 2025 11:48:57 +0200 Subject: [PATCH 087/181] [gateway] split GatewayInit into more methods. I have moved staticRoutes update for drLRPIfAddrs to after external switch creation, shouldn't break anything. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 232 +++++++++++++++++++------------ 1 file changed, 140 insertions(+), 92 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index ae8af7aa9f..28c685398a 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -438,58 +438,8 @@ func (gw *GatewayManager) createGWRouterPort(hostSubnets []*net.IPNet, gwLRPJoin return gwLRPIPs, nil } -// GatewayInit creates a gateway router for the local chassis. -// enableGatewayMTU enables options:gateway_mtu for gateway routers. -func (gw *GatewayManager) GatewayInit( - nodeName string, - clusterIPSubnet []*net.IPNet, - hostSubnets []*net.IPNet, - l3GatewayConfig *util.L3GatewayConfig, - gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, - externalIPs []net.IP, - enableGatewayMTU bool, -) error { - - // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, - // so let's save the old value before we update the router for later use - var oldExtIPs []net.IP - oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, - &nbdb.LogicalRouter{ - Name: gw.gwRouterName, - }) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) - } - - if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { - if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { - oldExternalIPs := strings.Split(physicalIPs, ",") - oldExtIPs = make([]net.IP, len(oldExternalIPs)) - for i, oldExternalIP := range oldExternalIPs { - cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) - } - oldExtIPs[i] = ip - } - } - } - - gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) - if err != nil { - return err - } - - if err = gw.createGWRouterPeerPort(nodeName); err != nil { - return err - } - - gwLRPIPs, err := gw.createGWRouterPort(hostSubnets, gwLRPJoinIPs, enableGatewayMTU, gwRouter) - if err != nil { - return err - } - +func (gw *GatewayManager) updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAddrs []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, externalRouterPort string, gwRouter *nbdb.LogicalRouter) error { if len(drLRPIfAddrs) > 0 { for _, entry := range clusterIPSubnet { drLRPIfAddr, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(entry), drLRPIfAddrs) @@ -534,38 +484,6 @@ func (gw *GatewayManager) GatewayInit( } } - if err := gw.addExternalSwitch("", - l3GatewayConfig.InterfaceID, - gw.gwRouterName, - l3GatewayConfig.MACAddress.String(), - physNetName(gw.netInfo), - l3GatewayConfig.IPAddresses, - l3GatewayConfig.VLANID); err != nil { - return err - } - - if l3GatewayConfig.EgressGWInterfaceID != "" { - if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, - l3GatewayConfig.EgressGWInterfaceID, - gw.gwRouterName, - l3GatewayConfig.EgressGWMACAddress.String(), - types.PhysicalNetworkExGwName, - l3GatewayConfig.EgressGWIPAddresses, - nil); err != nil { - return err - } - } - - externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName - // Remove stale OVN resources with any old masquerade IP - if err := deleteStaleMasqueradeResources(gw.nbClient, gw.gwRouterName, nodeName, gw.watchFactory); err != nil { - return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) - } - - if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo); err != nil { - return err - } - for _, nextHop := range node.DummyNextHopIPs() { // Add return service route for OVN back to host prefix := config.Gateway.V4MasqueradeSubnet @@ -588,7 +506,7 @@ func (gw *GatewayManager) GatewayInit( return item.OutputPort != nil && *item.OutputPort == *lrsr.OutputPort && item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(item.Policy, lrsr.Policy) } - err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, + err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) @@ -627,6 +545,10 @@ func (gw *GatewayManager) GatewayInit( } } + return nil +} + +func (gw *GatewayManager) updateClusterRouterStaticRoutes(hostSubnets []*net.IPNet, gwLRPIPs []net.IP) error { // We need to add a route to the Gateway router's IP, on the // cluster router, to ensure that the return traffic goes back // to the same gateway router @@ -721,17 +643,36 @@ func (gw *GatewayManager) GatewayInit( } } } + return nil +} +// syncNATsForGRIPChange updates the SNAT rules on the gateway router that are created outside the GatewayManager. +// Multiple handlers, like +// - DefaultNetworkController.addLogicalPort +// - DefaultNetworkController.updateNamespace +// - EgressIPController.addExternalGWPodSNATOps +// - EgressIPController.addPodEgressIPAssignment +// - SecondaryLayer2NetworkController.buildUDNEgressSNAT +// - SecondaryLayer3NetworkController.addUDNNodeSubnetEgressSNAT +// use gateway config parameters to create SNAT rules on the gateway router, but some of them (not all) don't watch +// gateway config changes and rely on the GatewayManager to update their SNAT rules. +// Is it racy? Yes! +// This function also updates SNAT created by `updateGWRouterNAT`, because NATs don't use ExternalIDs, +// and their fields are used to find equivalent NATs. That means on gateway IPs change, instead of updating +// the old NAT, we would create a new one. FIXME: add externalIDs to NATs +func (gw *GatewayManager) syncNATsForGRIPChange(externalIPs, oldExtIPs, gwLRPIPs []net.IP, + gwRouter, oldGWRouter *nbdb.LogicalRouter) error { // if config.Gateway.DisabledSNATMultipleGWs is not set (by default it is not), // the NAT rules for pods not having annotations to route through either external // gws or pod CNFs will be added within pods.go addLogicalPort var natsToUpdate []*nbdb.NAT // If l3gatewayAnnotation.IPAddresses changed, we need to update the SNATs on the GR oldNATs := []*nbdb.NAT{} - if oldLogicalRouter != nil { - oldNATs, err = libovsdbops.GetRouterNATs(gw.nbClient, oldLogicalRouter) + var err error + if oldGWRouter != nil { + oldNATs, err = libovsdbops.GetRouterNATs(gw.nbClient, oldGWRouter) if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("unable to get NAT entries for router on node %s: %w", nodeName, err) + return fmt.Errorf("unable to get NAT entries for router %s: %w", oldGWRouter.Name, err) } } @@ -789,7 +730,11 @@ func (gw *GatewayManager) GatewayInit( return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } + return nil +} +func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []*net.IPNet, l3GatewayConfig *util.L3GatewayConfig, + externalIPs, gwLRPIPs []net.IP, gwRouter *nbdb.LogicalRouter) error { // REMOVEME(trozet) workaround - create join subnet SNAT to handle ICMP needs frag return var extIDs map[string]string if gw.netInfo.IsSecondary() { @@ -812,7 +757,7 @@ func (gw *GatewayManager) GatewayInit( nat := libovsdbops.BuildSNAT(&externalIP[0], joinIPNet, "", extIDs) joinNATs = append(joinNATs, nat) } - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, joinNATs...) + err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, joinNATs...) if err != nil { return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gw.gwRouterName, err) } @@ -832,7 +777,7 @@ func (gw *GatewayManager) GatewayInit( nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, nats...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, nats...) if err != nil { return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } @@ -842,15 +787,118 @@ func (gw *GatewayManager) GatewayInit( nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) nats = append(nats, nat) } - err := libovsdbops.DeleteNATs(gw.nbClient, gwRouter, nats...) + err = libovsdbops.DeleteNATs(gw.nbClient, gwRouter, nats...) if err != nil { return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } - if err := gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses, gwLRPIPs); err != nil { + if err = gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses, gwLRPIPs); err != nil { return fmt.Errorf("failed to sync stale SNATs on node %s: %v", nodeName, err) } + return nil +} + +// GatewayInit creates a gateway router for the local chassis. +// enableGatewayMTU enables options:gateway_mtu for gateway routers. +func (gw *GatewayManager) GatewayInit( + nodeName string, + clusterIPSubnet []*net.IPNet, + hostSubnets []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, + gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, + externalIPs []net.IP, + enableGatewayMTU bool, +) error { + + // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, + // so let's save the old value before we update the router for later use + var oldExtIPs []net.IP + oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, + &nbdb.LogicalRouter{ + Name: gw.gwRouterName, + }) + if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) + } + + if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { + if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { + oldExternalIPs := strings.Split(physicalIPs, ",") + oldExtIPs = make([]net.IP, len(oldExternalIPs)) + for i, oldExternalIP := range oldExternalIPs { + cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) + } + oldExtIPs[i] = ip + } + } + } + + gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) + if err != nil { + return err + } + + if err = gw.createGWRouterPeerPort(nodeName); err != nil { + return err + } + + gwLRPIPs, err := gw.createGWRouterPort(hostSubnets, gwLRPJoinIPs, enableGatewayMTU, gwRouter) + if err != nil { + return err + } + + if err := gw.addExternalSwitch("", + l3GatewayConfig.InterfaceID, + gw.gwRouterName, + l3GatewayConfig.MACAddress.String(), + physNetName(gw.netInfo), + l3GatewayConfig.IPAddresses, + l3GatewayConfig.VLANID); err != nil { + return err + } + + if l3GatewayConfig.EgressGWInterfaceID != "" { + if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, + l3GatewayConfig.EgressGWInterfaceID, + gw.gwRouterName, + l3GatewayConfig.EgressGWMACAddress.String(), + types.PhysicalNetworkExGwName, + l3GatewayConfig.EgressGWIPAddresses, + nil); err != nil { + return err + } + } + + // Remove stale OVN resources with any old masquerade IP + if err := deleteStaleMasqueradeResources(gw.nbClient, gw.gwRouterName, nodeName, gw.watchFactory); err != nil { + return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) + } + + if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo); err != nil { + return err + } + + externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName + if err = gw.updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAddrs, l3GatewayConfig, externalRouterPort, + gwRouter); err != nil { + return err + } + + if err = gw.updateClusterRouterStaticRoutes(hostSubnets, gwLRPIPs); err != nil { + return err + } + + if err = gw.syncNATsForGRIPChange(externalIPs, oldExtIPs, gwLRPIPs, gwRouter, oldLogicalRouter); err != nil { + return err + } + + if err = gw.updateGWRouterNAT(nodeName, clusterIPSubnet, l3GatewayConfig, externalIPs, gwLRPIPs, gwRouter); err != nil { + return err + } // recording gateway mode metrics here after gateway setup is done metrics.RecordEgressRoutingViaHost() From 414e29623852c97be3743d29a5022db59947ff7c Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 12:14:30 +0200 Subject: [PATCH 088/181] [lint] add make lint-fix command to fix lint issues Add exit 1 to fail make command when container_runtime is not present. Signed-off-by: Nadia Pinaeva --- go-controller/Makefile | 12 ++++++++++-- go-controller/hack/lint.sh | 10 +++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/go-controller/Makefile b/go-controller/Makefile index 9b2a59e595..4c86486ce2 100644 --- a/go-controller/Makefile +++ b/go-controller/Makefile @@ -93,9 +93,17 @@ clean: lint: ifeq ($(CONTAINER_RUNNABLE), 0) - @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) + @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) || { echo "lint failed! Try running 'make lint-fix'"; exit 1; } else - echo "linter can only be run within a container since it needs a specific golangci-lint version" + echo "linter can only be run within a container since it needs a specific golangci-lint version"; exit 1 +endif + +lint-fix: +ifeq ($(CONTAINER_RUNNABLE), 0) + @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) fix || { echo "ERROR: lint fix failed! There is a bug that changes file ownership to root \ + when this happens. To fix it, simply run 'chown -R : *' from the repo root."; exit 1; } +else + echo "linter can only be run within a container since it needs a specific golangci-lint version"; exit 1 endif gofmt: diff --git a/go-controller/hack/lint.sh b/go-controller/hack/lint.sh index 5ac32e96dd..57f4695827 100755 --- a/go-controller/hack/lint.sh +++ b/go-controller/hack/lint.sh @@ -1,14 +1,18 @@ #!/usr/bin/env bash - VERSION=v1.60.3 +extra_flags="" if [ "$#" -ne 1 ]; then + if [ "$#" -eq 2 ] && [ "$2" == "fix" ]; then + extra_flags="--fix" + else echo "Expected command line argument - container runtime (docker/podman) got $# arguments: $@" exit 1 + fi fi $1 run --security-opt label=disable --rm \ -v ${HOME}/.cache/golangci-lint:/cache -e GOLANGCI_LINT_CACHE=/cache \ -v $(pwd):/app -w /app -e GO111MODULE=on docker.io/golangci/golangci-lint:${VERSION} \ golangci-lint run --verbose --print-resources-usage \ - --modules-download-mode=vendor --timeout=15m0s && \ - echo "lint OK!" + --modules-download-mode=vendor --timeout=15m0s ${extra_flags} && \ + echo "lint OK!" \ No newline at end of file From 77124b84a8b0507b1a11511a048ba143995bc8d8 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 11:08:55 +0200 Subject: [PATCH 089/181] [libovsdb/ops] Start adding option constants for ovn. Only added 4 constants for now that are related to gateway config. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/libovsdb/ops/options.go | 18 ++ .../pkg/ovn/base_network_controller.go | 4 +- .../pkg/ovn/base_network_controller_pods.go | 2 +- go-controller/pkg/ovn/egressgw_test.go | 101 +++---- go-controller/pkg/ovn/egressip_test.go | 254 +++++++++--------- go-controller/pkg/ovn/egressip_udn_l3_test.go | 39 +-- .../pkg/ovn/external_gateway_apb_test.go | 117 ++++---- go-controller/pkg/ovn/gateway.go | 8 +- go-controller/pkg/ovn/gateway_test.go | 9 +- go-controller/pkg/ovn/kubevirt_test.go | 8 +- go-controller/pkg/ovn/master_test.go | 6 +- go-controller/pkg/ovn/multihoming_test.go | 23 +- go-controller/pkg/ovn/multipolicy_test.go | 5 +- .../pkg/ovn/network_segmentation_test.go | 3 +- go-controller/pkg/ovn/pods_test.go | 12 +- .../secondary_layer2_network_controller.go | 4 +- ...econdary_layer2_network_controller_test.go | 3 +- ...econdary_layer3_network_controller_test.go | 8 +- .../pkg/ovn/topology/topologyfactory.go | 2 +- .../pkg/ovn/topology/topologyfactory_test.go | 3 +- .../ovn/zone_interconnect/zone_ic_handler.go | 12 +- 21 files changed, 335 insertions(+), 306 deletions(-) create mode 100644 go-controller/pkg/libovsdb/ops/options.go diff --git a/go-controller/pkg/libovsdb/ops/options.go b/go-controller/pkg/libovsdb/ops/options.go new file mode 100644 index 0000000000..d960062e92 --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/options.go @@ -0,0 +1,18 @@ +package ops + +// This is a list of options used for OVN operations. +// Started with adding only some of them, feel free to continue extending this list. +// Eventually we expect to have no string options in the code. +const ( + // RequestedTnlKey can be used by LogicalSwitch, LogicalSwitchPort, LogicalRouter and LogicalRouterPort + // for distributed switches/routers + RequestedTnlKey = "requested-tnl-key" + // RequestedChassis can be used by LogicalSwitchPort and LogicalRouterPort. + // It specifies the chassis (by name or hostname) that is allowed to bind this port. + RequestedChassis = "requested-chassis" + // RouterPort can be used by LogicalSwitchPort to specify a connection to a logical router. + RouterPort = "router-port" + // GatewayMTU can be used by LogicalRouterPort to specify the MTU for the gateway port. + // If set, logical flows will be added to router pipeline to check packet length. + GatewayMTU = "gateway_mtu" +) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 02c82b172f..aba4b9ab04 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -425,7 +425,7 @@ func (bnc *BaseNetworkController) syncNodeClusterRouterPort(node *corev1.Node, h enableGatewayMTU := util.ParseNodeGatewayMTUSupport(node) if enableGatewayMTU { lrpOptions = map[string]string{ - "gateway_mtu": strconv.Itoa(config.Default.MTU), + libovsdbops.GatewayMTU: strconv.Itoa(config.Default.MTU), } } logicalRouterPort := nbdb.LogicalRouterPort{ @@ -560,7 +560,7 @@ func (bnc *BaseNetworkController) createNodeLogicalSwitch(nodeName string, hostS Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": types.RouterToSwitchPrefix + switchName, + libovsdbops.RouterPort: types.RouterToSwitchPrefix + switchName, }, } if bnc.IsDefault() { diff --git a/go-controller/pkg/ovn/base_network_controller_pods.go b/go-controller/pkg/ovn/base_network_controller_pods.go index 1147983e79..4d334cf6a3 100644 --- a/go-controller/pkg/ovn/base_network_controller_pods.go +++ b/go-controller/pkg/ovn/base_network_controller_pods.go @@ -535,7 +535,7 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *corev1.Pod, nadNa // rescheduled. if !config.Kubernetes.DisableRequestedChassis { - lsp.Options["requested-chassis"] = pod.Spec.NodeName + lsp.Options[libovsdbops.RequestedChassis] = pod.Spec.NodeName } // let's calculate if this network controller's role for this pod diff --git a/go-controller/pkg/ovn/egressgw_test.go b/go-controller/pkg/ovn/egressgw_test.go index fff6c32fa3..9696d4192b 100644 --- a/go-controller/pkg/ovn/egressgw_test.go +++ b/go-controller/pkg/ovn/egressgw_test.go @@ -19,6 +19,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" @@ -133,8 +134,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -169,8 +170,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -273,8 +274,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -309,8 +310,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -417,8 +418,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -463,8 +464,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -895,8 +896,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -966,8 +967,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1076,8 +1077,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1116,8 +1117,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1237,8 +1238,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1277,8 +1278,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1408,8 +1409,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1448,8 +1449,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1589,8 +1590,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1629,8 +1630,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1662,8 +1663,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1708,8 +1709,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1742,8 +1743,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1782,8 +1783,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1896,8 +1897,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2030,8 +2031,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2171,8 +2172,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2364,8 +2365,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2546,8 +2547,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index b05422bf65..8cbac3665c 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -314,7 +314,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -428,7 +428,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -524,7 +524,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -632,7 +632,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -756,7 +756,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -766,7 +766,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -919,7 +919,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -929,7 +929,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1076,7 +1076,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1086,7 +1086,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1096,7 +1096,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1302,7 +1302,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1312,7 +1312,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1322,7 +1322,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1482,7 +1482,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1492,7 +1492,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1502,7 +1502,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1712,7 +1712,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1722,7 +1722,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1732,7 +1732,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1894,7 +1894,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1904,7 +1904,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2055,7 +2055,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2065,7 +2065,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2269,7 +2269,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2279,7 +2279,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2500,7 +2500,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2510,7 +2510,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2721,7 +2721,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2731,7 +2731,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2867,7 +2867,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2877,7 +2877,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3008,7 +3008,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3167,7 +3167,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3177,7 +3177,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3332,7 +3332,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3342,7 +3342,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5690,7 +5690,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5700,7 +5700,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5862,7 +5862,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5872,7 +5872,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6027,7 +6027,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6037,7 +6037,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6502,7 +6502,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6512,7 +6512,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6623,7 +6623,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6633,7 +6633,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6710,7 +6710,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + nodeName, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6804,7 +6804,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + nodeName, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6915,7 +6915,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7038,7 +7038,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7074,8 +7074,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -7136,7 +7136,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7271,7 +7271,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7391,7 +7391,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7427,8 +7427,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -7611,7 +7611,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7621,7 +7621,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7854,8 +7854,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -8233,7 +8233,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8317,7 +8317,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8402,7 +8402,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8489,7 +8489,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8499,7 +8499,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8584,7 +8584,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8594,7 +8594,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8688,7 +8688,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8698,7 +8698,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8783,7 +8783,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8793,7 +8793,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8868,7 +8868,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8878,7 +8878,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8998,7 +8998,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9008,7 +9008,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9143,7 +9143,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9153,7 +9153,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9271,7 +9271,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9355,7 +9355,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9437,7 +9437,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9649,7 +9649,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9659,7 +9659,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9743,7 +9743,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9753,7 +9753,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9827,7 +9827,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9837,7 +9837,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9921,7 +9921,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9931,7 +9931,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10023,7 +10023,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10033,7 +10033,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10118,7 +10118,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10128,7 +10128,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10203,7 +10203,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10213,7 +10213,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10314,7 +10314,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10324,7 +10324,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10421,7 +10421,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10617,7 +10617,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10910,7 +10910,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11241,7 +11241,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11251,7 +11251,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11361,7 +11361,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11371,7 +11371,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11493,7 +11493,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11503,7 +11503,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11683,7 +11683,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11693,7 +11693,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11831,7 +11831,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11841,7 +11841,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11956,7 +11956,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11966,7 +11966,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12064,7 +12064,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Options: map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, }, }, &nbdb.LogicalSwitch{ @@ -12195,7 +12195,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12329,7 +12329,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Options: map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, }, }, &nbdb.LogicalSwitchPort{ @@ -12337,7 +12337,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12347,7 +12347,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12479,7 +12479,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12489,7 +12489,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12499,7 +12499,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12661,7 +12661,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12671,7 +12671,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12681,7 +12681,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12847,7 +12847,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12857,7 +12857,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12867,7 +12867,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index dc01cc84c4..0d311a96d5 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -22,6 +22,7 @@ import ( ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/udnenabledsvc" @@ -56,6 +57,8 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol eIP1Mark = 50000 eIP2Mark = 50001 secondaryNetworkID = "2" + //tnlKey = zoneinterconnect.BaseTransitSwitchTunnelKey + secondaryNetworkID + tnlKey = "16711685" ) getEgressIPStatusLen := func(egressIPName string) func() int { @@ -1375,7 +1378,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol UUID: "stor-" + networkName1_ + node1Name + "-UUID", Name: "stor-" + networkName1_ + node1Name, Addresses: []string{"router"}, - Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, Type: "router", }, &nbdb.ACL{ @@ -1414,11 +1417,11 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol ovntypes.TopologyExternalID: ovntypes.Layer3Topology, ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, OtherConfig: map[string]string{ - "mcast_snoop": "true", - "mcast_querier": "false", - "mcast_flood_unregistered": "true", - "interconn-ts": networkName1_ + ovntypes.TransitSwitch, - "requested-tnl-key": "16711685", + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -1562,7 +1565,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol UUID: "stor-" + networkName1_ + node1Name + "-UUID", Name: "stor-" + networkName1_ + node1Name, Addresses: []string{"router"}, - Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, Type: "router", }, &nbdb.ACL{ @@ -1601,11 +1604,11 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol ovntypes.TopologyExternalID: ovntypes.Layer3Topology, ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, OtherConfig: map[string]string{ - "mcast_snoop": "true", - "mcast_querier": "false", - "mcast_flood_unregistered": "true", - "interconn-ts": networkName1_ + ovntypes.TransitSwitch, - "requested-tnl-key": "16711685", + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -2725,7 +2728,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol UUID: "stor-" + networkName1_ + node1Name + "-UUID", Name: "stor-" + networkName1_ + node1Name, Addresses: []string{"router"}, - Options: map[string]string{"router-port": "rtos-" + networkName1_ + node1Name}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, Type: "router", }, &nbdb.ACL{ @@ -2765,11 +2768,11 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary, }, OtherConfig: map[string]string{ - "mcast_snoop": "true", - "mcast_querier": "false", - "mcast_flood_unregistered": "true", - "interconn-ts": networkName1_ + ovntypes.TransitSwitch, - "requested-tnl-key": "16711685", + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), diff --git a/go-controller/pkg/ovn/external_gateway_apb_test.go b/go-controller/pkg/ovn/external_gateway_apb_test.go index 605066d7f6..b237174ae0 100644 --- a/go-controller/pkg/ovn/external_gateway_apb_test.go +++ b/go-controller/pkg/ovn/external_gateway_apb_test.go @@ -22,6 +22,7 @@ import ( adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" @@ -176,8 +177,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -212,8 +213,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -322,8 +323,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -358,8 +359,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -461,8 +462,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -497,8 +498,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -604,8 +605,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -650,8 +651,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -812,8 +813,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -894,8 +895,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1037,8 +1038,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:49:a1:93:cb fd00:10:244:2::3"}, }, @@ -1165,8 +1166,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1236,8 +1237,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1337,8 +1338,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1373,8 +1374,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1479,8 +1480,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1515,8 +1516,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1641,8 +1642,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1677,8 +1678,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1795,8 +1796,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1831,8 +1832,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1858,8 +1859,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1900,8 +1901,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1990,8 +1991,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2110,8 +2111,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2241,8 +2242,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2283,8 +2284,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2339,8 +2340,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2538,8 +2539,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 28c685398a..7c9a5834d9 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -342,7 +342,7 @@ func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": gwRouterPortName, + libovsdbops.RouterPort: gwRouterPortName, }, } if gw.netInfo.IsSecondary() { @@ -366,7 +366,7 @@ func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", nodeName, gw.netInfo.GetNetworkName(), err) } - logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) + logicalSwitchPort.Options[libovsdbops.RequestedTnlKey] = strconv.Itoa(tunnelID) } sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} err := libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) @@ -399,7 +399,7 @@ func (gw *GatewayManager) createGWRouterPort(hostSubnets []*net.IPNet, gwLRPJoin var options map[string]string if enableGatewayMTU { options = map[string]string{ - "gateway_mtu": strconv.Itoa(config.Default.MTU), + libovsdbops.GatewayMTU: strconv.Itoa(config.Default.MTU), } } @@ -974,7 +974,7 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, Name: externalSwitchPortToRouter, Type: "router", Options: map[string]string{ - "router-port": externalRouterPort, + libovsdbops.RouterPort: externalRouterPort, // This option will program OVN to start sending GARPs for all external IPS // that the logical switch port has been configured to use. This is diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 57f5fb4be2..a8e611119d 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -14,6 +14,7 @@ import ( utilnet "k8s.io/utils/net" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -87,7 +88,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN var options map[string]string if gatewayMTU != "" { options = map[string]string{ - "gateway_mtu": gatewayMTU, + libovsdbops.GatewayMTU: gatewayMTU, } } testData = append(testData, &nbdb.LogicalRouterPort{ @@ -344,7 +345,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": gwRouterPort, + libovsdbops.RouterPort: gwRouterPort, }, }, &nbdb.LogicalSwitchPort{ @@ -352,7 +353,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN Name: externalSwitchPortToRouter, Type: "router", Options: map[string]string{ - "router-port": externalRouterPort, + libovsdbops.RouterPort: externalRouterPort, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1692,7 +1693,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { &nbdb.LogicalRouterPort{ UUID: types.GWRouterToExtSwitchPrefix + types.GWRouterPrefix + nodeName + "-UUID", Name: types.GWRouterToExtSwitchPrefix + types.GWRouterPrefix + nodeName, - Options: map[string]string{"gateway_mtu": "1400"}, + Options: map[string]string{libovsdbops.GatewayMTU: "1400"}, }, expectedGR, expectedOVNClusterRouter, diff --git a/go-controller/pkg/ovn/kubevirt_test.go b/go-controller/pkg/ovn/kubevirt_test.go index b7c80c6399..71293a0763 100644 --- a/go-controller/pkg/ovn/kubevirt_test.go +++ b/go-controller/pkg/ovn/kubevirt_test.go @@ -540,8 +540,8 @@ var _ = Describe("OVN Kubevirt Operations", func() { UUID: ovntypes.SwitchToRouterPrefix + t.nodeName + "-UUID", Type: "router", Options: map[string]string{ - "router-port": logicalRouterPort.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: logicalRouterPort.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, } logicalSwitch = &nbdb.LogicalSwitch{ @@ -600,8 +600,8 @@ var _ = Describe("OVN Kubevirt Operations", func() { UUID: ovntypes.SwitchToRouterPrefix + t.migrationTarget.nodeName + "-UUID", Type: "router", Options: map[string]string{ - "router-port": migrationTargetLRP.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: migrationTargetLRP.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, } migrationTargetLS = &nbdb.LogicalSwitch{ diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 5c1c8b2c4b..aef5eb82b6 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -336,7 +336,7 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus Networks: []string{node.NodeGWIP}, GatewayChassis: []string{chassisName + "-UUID"}, Options: map[string]string{ - "gateway_mtu": "1400", + libovsdbops.GatewayMTU: "1400", }, }) if serviceControllerEnabled { @@ -356,8 +356,8 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus UUID: types.SwitchToRouterPrefix + node.Name + "-UUID", Type: "router", Options: map[string]string{ - "router-port": types.RouterToSwitchPrefix + node.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: types.RouterToSwitchPrefix + node.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, Addresses: []string{"router"}, }) diff --git a/go-controller/pkg/ovn/multihoming_test.go b/go-controller/pkg/ovn/multihoming_test.go index a7b69c3fb9..bfcdcd1a75 100644 --- a/go-controller/pkg/ovn/multihoming_test.go +++ b/go-controller/pkg/ovn/multihoming_test.go @@ -16,6 +16,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -165,7 +166,7 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit delete(lsp.Options, "iface-id-ver") } if ocInfo.bnc.isLayer2Interconnect() { - lsp.Options["requested-tnl-key"] = "1" // hardcode this for now. + lsp.Options[libovsdbops.RequestedTnlKey] = "1" // hardcode this for now. } data = append(data, lsp) @@ -216,12 +217,12 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit "k8s.ovn.org/topology": ocInfo.bnc.TopologyType(), "k8s.ovn.org/network": ocInfo.bnc.GetNetworkName(), }, - Options: map[string]string{"router-port": ovntypes.RouterToSwitchPrefix + switchName}, + Options: map[string]string{libovsdbops.RouterPort: ovntypes.RouterToSwitchPrefix + switchName}, Type: "router", } data = append(data, lsp) if util.IsNetworkSegmentationSupportEnabled() && ocInfo.bnc.IsPrimaryNetwork() { - lsp.Options["requested-tnl-key"] = "25" + lsp.Options[libovsdbops.RequestedTnlKey] = "25" } nodeslsps[switchName] = append(nodeslsps[switchName], networkSwitchToGWRouterLSPUUID) @@ -291,11 +292,11 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit UUID: transitSwitchName + "-UUID", Name: transitSwitchName, OtherConfig: map[string]string{ - "mcast_querier": "false", - "mcast_flood_unregistered": "true", - "interconn-ts": transitSwitchName, - "requested-tnl-key": "16711685", - "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": transitSwitchName, + libovsdbops.RequestedTnlKey: "16711685", + "mcast_snoop": "true", }, ExternalIDs: extIDs, }) @@ -332,8 +333,8 @@ func newExpectedSwitchPort(lspUUID string, portName string, podAddr string, pod ovntypes.TopologyExternalID: netInfo.TopologyType(), }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, } @@ -343,7 +344,7 @@ func newExpectedSwitchToRouterPort(lspUUID string, portName string, pod testPod, lrp := newExpectedSwitchPort(lspUUID, portName, "router", pod, netInfo, nad) lrp.ExternalIDs = nil lrp.Options = map[string]string{ - "router-port": "rtos-isolatednet_test-node", + libovsdbops.RouterPort: "rtos-isolatednet_test-node", } lrp.PortSecurity = nil lrp.Type = "router" diff --git a/go-controller/pkg/ovn/multipolicy_test.go b/go-controller/pkg/ovn/multipolicy_test.go index bb132d215a..095b35772f 100644 --- a/go-controller/pkg/ovn/multipolicy_test.go +++ b/go-controller/pkg/ovn/multipolicy_test.go @@ -20,6 +20,7 @@ import ( ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -150,8 +151,8 @@ func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods [] ovntypes.TopologyExternalID: ocInfo.bnc.TopologyType(), }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, diff --git a/go-controller/pkg/ovn/network_segmentation_test.go b/go-controller/pkg/ovn/network_segmentation_test.go index f52ad64c2f..cfcc0f7e83 100644 --- a/go-controller/pkg/ovn/network_segmentation_test.go +++ b/go-controller/pkg/ovn/network_segmentation_test.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -84,7 +85,7 @@ var _ = ginkgo.Describe("OVN Pod Operations with network segmentation", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t1.nodeName, + libovsdbops.RequestedChassis: t1.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index 6045183157..cf1caae6e7 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -470,8 +470,8 @@ func getExpectedDataPodsSwitchesPortGroup(netInfo util.NetInfo, pods []testPod, "namespace": pod.namespace, }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, } @@ -2018,7 +2018,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t2.nodeName, + libovsdbops.RequestedChassis: t2.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, @@ -2033,7 +2033,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { "namespace": t2.namespace, }, Options: map[string]string{ - "requested-chassis": t2.nodeName, + libovsdbops.RequestedChassis: t2.nodeName, //"iface-id-ver": is empty to check that it won't be set on update }, PortSecurity: []string{fmt.Sprintf("%s %s", t2.podMAC, t2.podIP)}, @@ -2048,7 +2048,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t3.nodeName, + libovsdbops.RequestedChassis: t3.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, @@ -2218,7 +2218,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t1.nodeName, + libovsdbops.RequestedChassis: t1.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index fd15ab6684..d5fd185c9a 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -725,8 +725,8 @@ func (oc *SecondaryLayer2NetworkController) addPortForRemoteNodeGR(node *corev1. node.Name, oc.GetNetworkName(), err) } logicalSwitchPort.Options = map[string]string{ - "requested-tnl-key": strconv.Itoa(tunnelID), - "requested-chassis": node.Name, + libovsdbops.RequestedTnlKey: strconv.Itoa(tunnelID), + libovsdbops.RequestedChassis: node.Name, } sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch)} err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(oc.nbClient, &sw, &logicalSwitchPort) diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go index 91fc80bc6e..1079a14198 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go @@ -19,6 +19,7 @@ import ( "k8s.io/utils/ptr" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -605,7 +606,7 @@ func expectedLayer2EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayC } func expectedGWToNetworkSwitchRouterPort(name string, netInfo util.NetInfo, networks ...*net.IPNet) *nbdb.LogicalRouterPort { - options := map[string]string{"gateway_mtu": fmt.Sprintf("%d", 1400)} + options := map[string]string{libovsdbops.GatewayMTU: fmt.Sprintf("%d", 1400)} lrp := expectedLogicalRouterPort(name, netInfo, options, networks...) if config.IPv6Mode { diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go index 077a6fd822..163d06dfd9 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go @@ -762,7 +762,7 @@ func expectedGatewayChassis(nodeName string, netInfo util.NetInfo, gwConfig util func expectedGRToJoinSwitchLRP(gatewayRouterName string, gwRouterLRPIP *net.IPNet, netInfo util.NetInfo) *nbdb.LogicalRouterPort { lrpName := fmt.Sprintf("%s%s", types.GWRouterToJoinSwitchPrefix, gatewayRouterName) - options := map[string]string{"gateway_mtu": fmt.Sprintf("%d", 1400)} + options := map[string]string{libovsdbops.GatewayMTU: fmt.Sprintf("%d", 1400)} return expectedLogicalRouterPort(lrpName, netInfo, options, gwRouterLRPIP) } @@ -836,7 +836,7 @@ func expectedLayer3EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayC Networks: []string{"192.168.1.1/24"}, MAC: "0a:58:c0:a8:01:01", GatewayChassis: []string{gatewayChassisUUID}, - Options: map[string]string{"gateway_mtu": "1400"}, + Options: map[string]string{libovsdbops.GatewayMTU: "1400"}, }, expectedGRStaticRoute(staticRouteUUID1, nodeSubnet.String(), lrsrNextHop, &nbdb.LogicalRouterStaticRoutePolicySrcIP, nil, netInfo), expectedGRStaticRoute(staticRouteUUID2, gwRouterJoinIPAddress().IP.String(), gwRouterJoinIPAddress().IP.String(), nil, nil, netInfo), @@ -973,7 +973,7 @@ func externalSwitchRouterPortOptions(gatewayRouterName string) map[string]string return map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + gatewayRouterName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + gatewayRouterName, } } @@ -992,7 +992,7 @@ func expectedJoinSwitchAndLSPs(netInfo util.NetInfo, nodeName string) []libovsdb Name: types.JoinSwitchToGWRouterPrefix + gwRouterName, Addresses: []string{"router"}, ExternalIDs: standardNonDefaultNetworkExtIDs(netInfo), - Options: map[string]string{"router-port": types.GWRouterToJoinSwitchPrefix + gwRouterName}, + Options: map[string]string{libovsdbops.RouterPort: types.GWRouterToJoinSwitchPrefix + gwRouterName}, Type: "router", }, } diff --git a/go-controller/pkg/ovn/topology/topologyfactory.go b/go-controller/pkg/ovn/topology/topologyfactory.go index d9a1980cbc..b20743a242 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory.go +++ b/go-controller/pkg/ovn/topology/topologyfactory.go @@ -130,7 +130,7 @@ func (gtf *GatewayTopologyFactory) NewJoinSwitch( Name: drSwitchPort, Type: "router", Options: map[string]string{ - "router-port": drRouterPort, + libovsdbops.RouterPort: drRouterPort, }, Addresses: []string{"router"}, } diff --git a/go-controller/pkg/ovn/topology/topologyfactory_test.go b/go-controller/pkg/ovn/topology/topologyfactory_test.go index af8a036d6f..4d189e030a 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory_test.go +++ b/go-controller/pkg/ovn/topology/topologyfactory_test.go @@ -9,6 +9,7 @@ import ( ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -237,7 +238,7 @@ func expectedLogicalSwitchPort(portName string) *nbdb.LogicalSwitchPort { Addresses: []string{"router"}, Name: portName, Options: map[string]string{ - "router-port": "rtoj-mydearrouter", + libovsdbops.RouterPort: "rtoj-mydearrouter", }, ParentName: nil, PortSecurity: nil, diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index 9d088e6659..ab366c7931 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -360,7 +360,7 @@ func (zic *ZoneInterconnectHandler) AddTransitPortConfig(remote bool, podAnnotat if port.Options == nil { port.Options = map[string]string{} } - port.Options["requested-tnl-key"] = strconv.Itoa(podAnnotation.TunnelID) + port.Options[libovsdbops.RequestedTnlKey] = strconv.Itoa(podAnnotation.TunnelID) if remote { port.Type = lportTypeRemote @@ -375,7 +375,7 @@ func (zic *ZoneInterconnectHandler) addTransitSwitchConfig(sw *nbdb.LogicalSwitc } sw.OtherConfig["interconn-ts"] = sw.Name - sw.OtherConfig["requested-tnl-key"] = strconv.Itoa(BaseTransitSwitchTunnelKey + networkID) + sw.OtherConfig[libovsdbops.RequestedTnlKey] = strconv.Itoa(BaseTransitSwitchTunnelKey + networkID) sw.OtherConfig["mcast_snoop"] = "true" sw.OtherConfig["mcast_querier"] = "false" sw.OtherConfig["mcast_flood_unregistered"] = "true" @@ -420,8 +420,8 @@ func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.No } lspOptions := map[string]string{ - "router-port": logicalRouterPortName, - "requested-tnl-key": strconv.Itoa(nodeID), + libovsdbops.RouterPort: logicalRouterPortName, + libovsdbops.RequestedTnlKey: strconv.Itoa(nodeID), } // Store the node name in the external_ids column for book keeping @@ -459,8 +459,8 @@ func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.N } lspOptions := map[string]string{ - "requested-tnl-key": strconv.Itoa(nodeID), - "requested-chassis": node.Name, + libovsdbops.RequestedTnlKey: strconv.Itoa(nodeID), + libovsdbops.RequestedChassis: node.Name, } // Store the node name in the external_ids column for book keeping externalIDs := map[string]string{ From 4834e3deebff6b8a32fb94ca2cd95448c48dd4fd Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Mon, 14 Jul 2025 19:10:40 +0200 Subject: [PATCH 090/181] Fix log, reuse existing function in GetNetworkScopedSwitchName. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 2 +- go-controller/pkg/util/multi_network.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 7c9a5834d9..6b69507c3d 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -354,7 +354,7 @@ func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { if gw.netInfo.TopologyType() == types.Layer2Topology { node, err := gw.watchFactory.GetNode(nodeName) if err != nil { - return fmt.Errorf("failed to fetch node %s from watch factory %w", node, err) + return fmt.Errorf("failed to fetch node %s from watch factory %w", node.Name, err) } tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) if err != nil { diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index b4a5bd4b98..fd91edd3be 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -709,7 +709,7 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedGWRouterName(nodeName string) str func (nInfo *secondaryNetInfo) GetNetworkScopedSwitchName(nodeName string) string { // In Layer2Topology there is just one global switch if nInfo.TopologyType() == types.Layer2Topology { - return fmt.Sprintf("%s%s", nInfo.getPrefix(), types.OVNLayer2Switch) + return nInfo.GetNetworkScopedName(types.OVNLayer2Switch) } return nInfo.GetNetworkScopedName(nodeName) } From 1d9b4cf0911a5ffbc8563d60ec2fcba696f66d13 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 09:32:31 +0200 Subject: [PATCH 091/181] [pod SNAT] reuse code around pod SNAT creation. Remove unused "match" parameter from addOrUpdatePodSNATOps. Rename logicalRouter/router to gwRouter. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/egressgw.go | 19 ++++++++----------- go-controller/pkg/ovn/egressip.go | 8 ++++---- go-controller/pkg/ovn/pods.go | 2 +- .../secondary_layer2_network_controller.go | 12 ++++++------ 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index 1f28955295..2b8e939585 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -649,15 +649,12 @@ func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwR // addOrUpdatePodSNAT adds or updates per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet) error { - nats, err := buildPodSNAT(extIPs, podIfAddrs, "") + ops, err := addOrUpdatePodSNATOps(nbClient, gwRouterName, extIPs, podIfAddrs, nil) if err != nil { return err } - logicalRouter := nbdb.LogicalRouter{ - Name: gwRouterName, - } - if err := libovsdbops.CreateOrUpdateNATs(nbClient, &logicalRouter, nats...); err != nil { - return fmt.Errorf("failed to update SNAT for pods of router %s: %v", logicalRouter.Name, err) + if _, err = libovsdbops.TransactAndCheck(nbClient, ops); err != nil { + return fmt.Errorf("failed to update SNAT for pods of router %s: %v", gwRouterName, err) } return nil } @@ -665,14 +662,14 @@ func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, ext // addOrUpdatePodSNATOps returns the operation that adds or updates per pod SNAT rules towards the nodeIP that are // applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, match string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { - router := &nbdb.LogicalRouter{Name: gwRouterName} - nats, err := buildPodSNAT(extIPs, podIfAddrs, match) +func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { + gwRouter := &nbdb.LogicalRouter{Name: gwRouterName} + nats, err := buildPodSNAT(extIPs, podIfAddrs, "") if err != nil { return nil, err } - if ops, err = libovsdbops.CreateOrUpdateNATsOps(nbClient, ops, router, nats...); err != nil { - return nil, fmt.Errorf("failed to update SNAT for pods of router: %s, error: %v", gwRouterName, err) + if ops, err = libovsdbops.CreateOrUpdateNATsOps(nbClient, ops, gwRouter, nats...); err != nil { + return nil, fmt.Errorf("failed to create ops to update SNAT for pods of router: %s, error: %v", gwRouterName, err) } return ops, nil } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index fff34928c6..5f50cefb95 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -2594,7 +2594,7 @@ func (e *EgressIPController) addExternalGWPodSNATOps(ni util.NetInfo, ops []ovsd if err != nil { return nil, err } - ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, "", ops) + ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, ops) if err != nil { return nil, err } @@ -3639,12 +3639,12 @@ func (e *EgressIPController) createNATRuleOps(ni util.NetInfo, ops []ovsdb.Opera nats = append(nats, nat) } } - router := &nbdb.LogicalRouter{ + gwRouter := &nbdb.LogicalRouter{ Name: ni.GetNetworkScopedGWRouterName(status.Node), } - ops, err = libovsdbops.CreateOrUpdateNATsOps(e.nbClient, ops, router, nats...) + ops, err = libovsdbops.CreateOrUpdateNATsOps(e.nbClient, ops, gwRouter, nats...) if err != nil { - return nil, fmt.Errorf("unable to create snat rules, for router: %s, error: %v", router.Name, err) + return nil, fmt.Errorf("unable to create snat rules, for router: %s, error: %v", gwRouter.Name, err) } return ops, nil } diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 9f39376d9e..5c3478f3cb 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -315,7 +315,7 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *corev1.Pod) (err error) // namespace annotations to go through external egress router if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { return err - } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, "", ops); err != nil { + } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, ops); err != nil { return err } } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index d5fd185c9a..d43b853008 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -757,8 +757,8 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e // externalIP = "169.254.0.12"; which is the masqueradeIP for this L2 UDN // so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, // which are leaving via UDN's mpX interface to the UDN's masqueradeIP. -func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, routerName string) error { - outputPort := types.GWRouterToJoinSwitchPrefix + routerName +func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string) error { + outputPort := types.GWRouterToJoinSwitchPrefix + gwRouterName nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) if err != nil { return err @@ -766,12 +766,12 @@ func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localP if len(nats) == 0 { return nil // nothing to do } - router := &nbdb.LogicalRouter{ - Name: routerName, + gwRouter := &nbdb.LogicalRouter{ + Name: gwRouterName, } - if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, router, nats...); err != nil { + if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, gwRouter, nats...); err != nil { return fmt.Errorf("failed to update SNAT for cluster on router: %q for network %q, error: %w", - routerName, oc.GetNetworkName(), err) + gwRouterName, oc.GetNetworkName(), err) } return nil } From cc8e9c83c0efa05db7f72e57760b06ce2af5bb76 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 10:50:12 +0200 Subject: [PATCH 092/181] [master] reuse SecondaryL3GatewayConfig for master gateway configuration Move gateway-related info gathering to nodeGatewayConfig function similar to secondary controllers. syncNodeGateway was always called with hostSubnets=nil, remove. Update UT: master_test was always passing hostAddrs to syncDefaultGatewayLogicalNetwork, but the real code only sets them for shared GW mode. Update addNodeLogicalFlowsHelper to account for that, move its call from BeforeEach to the test, as GW mode is not set in BeforeEach. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/hybrid_test.go | 32 ++------ go-controller/pkg/ovn/master.go | 66 ++++++++++------ go-controller/pkg/ovn/master_test.go | 75 ++++++++++++------- go-controller/pkg/ovn/ovn.go | 35 ++++----- .../secondary_layer3_network_controller.go | 13 +--- 5 files changed, 111 insertions(+), 110 deletions(-) diff --git a/go-controller/pkg/ovn/hybrid_test.go b/go-controller/pkg/ovn/hybrid_test.go index fab60e2c3b..4b01354429 100644 --- a/go-controller/pkg/ovn/hybrid_test.go +++ b/go-controller/pkg/ovn/hybrid_test.go @@ -335,10 +335,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -429,7 +425,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRIP, nodeHOIP)) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var clusterSubnets []*net.IPNet @@ -617,10 +613,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -705,7 +697,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { setupCOPP := true setupClusterController(clusterController, setupCOPP) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //assuming all the pods have finished processing @@ -826,10 +818,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -912,7 +900,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var clusterSubnets []*net.IPNet @@ -1124,10 +1112,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode1.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1214,8 +1198,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) //ensure hybrid overlay elements have been added - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func() ([]*nbdb.LogicalRouterStaticRoute, error) { @@ -1337,10 +1320,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1419,8 +1398,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { return updatedNode.Annotations, nil }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // switch the node to a ovn node diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index f85cdb75c3..13d5283012 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -33,6 +33,15 @@ const ( OvnNodeAnnotationRetryTimeout = 1 * time.Second ) +type L3GatewayConfig struct { + config *util.L3GatewayConfig + hostSubnets []*net.IPNet + clusterSubnets []*net.IPNet + gwLRPJoinIPs []*net.IPNet + hostAddrs []string + externalIPs []net.IP +} + // SetupMaster creates the central router and load-balancers for the network func (oc *DefaultNetworkController) SetupMaster() error { // Create default Control Plane Protection (COPP) entry for routers @@ -82,17 +91,35 @@ func (oc *DefaultNetworkController) syncNodeManagementPortDefault(node *corev1.N return err } -func (oc *DefaultNetworkController) syncDefaultGatewayLogicalNetwork( - node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, -) error { +func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*L3GatewayConfig, error) { + l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) + if err != nil { + return nil, err + } + + externalIPs := make([]net.IP, len(l3GatewayConfig.IPAddresses)) + for i, ip := range l3GatewayConfig.IPAddresses { + externalIPs[i] = ip.IP + } + + var hostAddrs []string + if config.Gateway.Mode == config.GatewayModeShared { + hostAddrs, err = util.GetNodeHostAddrs(node) + if err != nil && !util.IsAnnotationNotSetError(err) { + return nil, fmt.Errorf("failed to get host CIDRs for node: %s: %v", node.Name, err) + } + } + var clusterSubnets []*net.IPNet for _, clusterSubnet := range config.Default.ClusterSubnets { clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } + hostSubnets, err := util.ParseNodeHostSubnetAnnotation(node, oc.GetNetworkName()) + if err != nil { + return nil, err + } + gwLRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, oc.GetNetworkName()) if err != nil { if util.IsAnnotationNotSetError(err) { @@ -101,26 +128,19 @@ func (oc *DefaultNetworkController) syncDefaultGatewayLogicalNetwork( var err1 error gwLRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) if err1 != nil { - return fmt.Errorf("failed to get join switch port IP address for node %s: %v/%v", node.Name, err, err1) + return nil, fmt.Errorf("failed to get join switch port IP address for node %s: %v/%v", node.Name, err, err1) } } } - externalIPs := make([]net.IP, len(l3GatewayConfig.IPAddresses)) - for i, ip := range l3GatewayConfig.IPAddresses { - externalIPs[i] = ip.IP - } - - return oc.newGatewayManager(node.Name).syncGatewayLogicalNetwork( - node, - l3GatewayConfig, - hostSubnets, - hostAddrs, - clusterSubnets, - gwLRPIPs, - oc.ovnClusterLRPToJoinIfAddrs, - externalIPs, - ) + return &L3GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, + }, nil } func (oc *DefaultNetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { @@ -596,7 +616,7 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n } if nSyncs.syncGw { - err := oc.syncNodeGateway(node, nil) + err := oc.syncNodeGateway(node) if err != nil { errs = append(errs, err) oc.gatewaysFailed.Store(node.Name, true) diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index aef5eb82b6..b7b902f740 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -383,14 +383,19 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus Nexthops: []string{node.NodeMgmtPortIP}, Priority: intPriority, }) - testData = append(testData, &nbdb.LogicalRouterPolicy{ - UUID: "policy-based-route-2-UUID", - Action: nbdb.LogicalRouterPolicyActionReroute, - Match: matchStr2, - Nexthops: []string{node.NodeMgmtPortIP}, - Priority: intPriority, - }) - expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, []string{"policy-based-route-1-UUID", "policy-based-route-2-UUID"}...) + expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, "policy-based-route-1-UUID") + + if config.Gateway.Mode == config.GatewayModeShared { + testData = append(testData, &nbdb.LogicalRouterPolicy{ + UUID: "policy-based-route-2-UUID", + Action: nbdb.LogicalRouterPolicyActionReroute, + Match: matchStr2, + Nexthops: []string{node.NodeMgmtPortIP}, + Priority: intPriority, + }) + expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, "policy-based-route-2-UUID") + + } testData = append(testData, expectedClusterPortGroup) testData = append(testData, expectedClusterRouterPortGroup) return testData @@ -1093,9 +1098,6 @@ var _ = ginkgo.Describe("Default network controller operations", func() { }() oc.SCTPSupport = true - - expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, - expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) }) ginkgo.AfterEach(func() { @@ -1113,7 +1115,11 @@ var _ = ginkgo.Describe("Default network controller operations", func() { clusterSubnets := startFakeController(oc, wg) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) retry.InitRetryObjWithAdd(testNode, testNode.Name, oc.retryNodes) gomega.Expect(retry.RetryObjsLen(oc.retryNodes)).To(gomega.Equal(1)) @@ -1129,6 +1135,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(retry.CheckRetryObj(testNode.Name, oc.retryNodes)).To(gomega.BeFalse()) skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1171,11 +1179,13 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = oc.syncNodeManagementPortDefault(node, node.Name, []*net.IPNet{subnet}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = oc.syncDefaultGatewayLogicalNetwork(node, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + err = oc.syncNodeGateway(node) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Stale route should have been removed") skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1202,10 +1212,15 @@ var _ = ginkgo.Describe("Default network controller operations", func() { clusterSubnets := startFakeController(oc, wg) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1260,7 +1275,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { // ensure the stale SNAT's are cleaned up gomega.Expect(oc.StartServiceController(wg, false)).To(gomega.Succeed()) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) skipSnat := config.Gateway.DisableSNATMultipleGWs || oc.isPodNetworkAdvertisedAtNode(node1.Name) @@ -1268,6 +1286,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { for _, clusterSubnet := range config.Default.ClusterSubnets { clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1349,6 +1369,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { skipSnat := false subnet := ovntest.MustParseIPNet(node1.NodeSubnet) + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1394,6 +1416,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { skipSnat := false subnet := ovntest.MustParseIPNet(node1.NodeSubnet) + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1459,13 +1483,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) startFakeController(oc, wg) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - nodeHostAddrs := []string{} - for _, nodeHostCIDR := range nodeHostCIDRs.UnsortedList() { - ip, _, _ := net.ParseCIDR(nodeHostCIDR) - nodeHostAddrs = append(nodeHostAddrs, ip.String()) - } - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, nodeHostAddrs) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // inject transient problem, nbdb is down @@ -1558,13 +1579,9 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) startFakeController(oc, wg) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - nodeHostAddrs := []string{} - for _, nodeHostCIDR := range nodeHostCIDRs.UnsortedList() { - ip, _, _ := net.ParseCIDR(nodeHostCIDR) - nodeHostAddrs = append(nodeHostAddrs, ip.String()) - } - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, nodeHostAddrs) + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Delete the node's gateway Logical Router Port to force node delete to handle a diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index c6a53ee34e..d035fdd299 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -373,32 +373,27 @@ func (oc *DefaultNetworkController) WatchEgressIPPods() error { } // syncNodeGateway ensures a node's gateway router is configured -func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node, hostSubnets []*net.IPNet) error { - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) +func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node) error { + gwConfig, err := oc.nodeGatewayConfig(node) if err != nil { - return err + return fmt.Errorf("error getting gateway config for node %s: %v", node.Name, err) } - if hostSubnets == nil { - hostSubnets, err = util.ParseNodeHostSubnetAnnotation(node, ovntypes.DefaultNetworkName) - if err != nil { - return err - } - } - - if l3GatewayConfig.Mode == config.GatewayModeDisabled { + if gwConfig.config.Mode == config.GatewayModeDisabled { if err := oc.newGatewayManager(node.Name).Cleanup(); err != nil { return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) } - } else if hostSubnets != nil { - var hostAddrs []string - if config.Gateway.Mode == config.GatewayModeShared { - hostAddrs, err = util.GetNodeHostAddrs(node) - if err != nil && !util.IsAnnotationNotSetError(err) { - return fmt.Errorf("failed to get host CIDRs for node: %s: %v", node.Name, err) - } - } - if err := oc.syncDefaultGatewayLogicalNetwork(node, l3GatewayConfig, hostSubnets, hostAddrs); err != nil { + } else { + if err := oc.newGatewayManager(node.Name).syncGatewayLogicalNetwork( + node, + gwConfig.config, + gwConfig.hostSubnets, + gwConfig.hostAddrs, + gwConfig.clusterSubnets, + gwConfig.gwLRPJoinIPs, + oc.ovnClusterLRPToJoinIfAddrs, + gwConfig.externalIPs, + ); err != nil { return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) } } diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index 15fdb98aa7..ac945b2739 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -1045,16 +1045,7 @@ func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { return nil } -type SecondaryL3GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - clusterSubnets []*net.IPNet - gwLRPJoinIPs []*net.IPNet - hostAddrs []string - externalIPs []net.IP -} - -func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*SecondaryL3GatewayConfig, error) { +func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*L3GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -1104,7 +1095,7 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) - return &SecondaryL3GatewayConfig{ + return &L3GatewayConfig{ config: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterSubnets, From 02c2c185086795dee7ab3be7edfec3dd801960a2 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 10:59:58 +0200 Subject: [PATCH 093/181] [gateway] update syncNodeGateway to reduce nesting Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 6b69507c3d..39f6e36116 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -1402,21 +1402,21 @@ func (gw *GatewayManager) syncNodeGateway( if err := gw.Cleanup(); err != nil { return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) } - } else if hostSubnets != nil { - if err := gw.syncGatewayLogicalNetwork( - node, - l3GatewayConfig, - hostSubnets, - hostAddrs, - clusterSubnets, - grLRPJoinIPs, // the joinIP allocated to this node for this controller's network - joinSwitchIPs, // the .1 of this controller's global joinSubnet - externalIPs, - ); err != nil { - return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) - } + return nil } - return nil + if hostSubnets == nil { + return nil + } + return gw.syncGatewayLogicalNetwork( + node, + l3GatewayConfig, + hostSubnets, + hostAddrs, + clusterSubnets, + grLRPJoinIPs, // the joinIP allocated to this node for this controller's network + joinSwitchIPs, // the .1 of this controller's global joinSubnet + externalIPs, + ) } func physNetName(netInfo util.NetInfo) string { From e427103f5f97a8eb03db8e2675411863f8edb6bf Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 11:04:59 +0200 Subject: [PATCH 094/181] [gateway] rename public and private interfaces. Squash syncGatewayLogicalNetwork and syncNodeGateway into 1 function SyncGateway. Call SyncGateway from ovn.go since it does config.GatewayModeDisabled check inside. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 53 ++++++------------- go-controller/pkg/ovn/gateway_test.go | 32 +++++------ go-controller/pkg/ovn/ovn.go | 28 ++++------ .../secondary_layer2_network_controller.go | 2 +- .../secondary_layer3_network_controller.go | 2 +- 5 files changed, 45 insertions(+), 72 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 39f6e36116..8d084476a1 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -799,9 +799,9 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* return nil } -// GatewayInit creates a gateway router for the local chassis. +// gatewayInit creates a gateway router for the local chassis. // enableGatewayMTU enables options:gateway_mtu for gateway routers. -func (gw *GatewayManager) GatewayInit( +func (gw *GatewayManager) gatewayInit( nodeName string, clusterIPSubnet []*net.IPNet, hostSubnets []*net.IPNet, @@ -1330,19 +1330,29 @@ func (gw *GatewayManager) isRoutingAdvertised(node string) bool { return util.IsPodNetworkAdvertisedAtNode(gw.netInfo, node) } -func (gw *GatewayManager) syncGatewayLogicalNetwork( +// SyncGateway ensures a node's gateway router is configured according to the L3 config and host subnets +func (gw *GatewayManager) SyncGateway( node *corev1.Node, l3GatewayConfig *util.L3GatewayConfig, hostSubnets []*net.IPNet, hostAddrs []string, - clusterSubnets []*net.IPNet, - grLRPJoinIPs []*net.IPNet, + clusterSubnets, grLRPJoinIPs []*net.IPNet, ovnClusterLRPToJoinIfAddrs []*net.IPNet, externalIPs []net.IP, ) error { + if l3GatewayConfig.Mode == config.GatewayModeDisabled { + if err := gw.Cleanup(); err != nil { + return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) + } + return nil + } + if hostSubnets == nil { + return nil + } + enableGatewayMTU := util.ParseNodeGatewayMTUSupport(node) - err := gw.GatewayInit( + err := gw.gatewayInit( node.Name, clusterSubnets, hostSubnets, @@ -1388,37 +1398,6 @@ func (gw *GatewayManager) syncGatewayLogicalNetwork( return nil } -// syncNodeGateway ensures a node's gateway router is configured according to the L3 config and host subnets -func (gw *GatewayManager) syncNodeGateway( - node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, - clusterSubnets, grLRPJoinIPs []*net.IPNet, - joinSwitchIPs []*net.IPNet, - externalIPs []net.IP, -) error { - if l3GatewayConfig.Mode == config.GatewayModeDisabled { - if err := gw.Cleanup(); err != nil { - return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) - } - return nil - } - if hostSubnets == nil { - return nil - } - return gw.syncGatewayLogicalNetwork( - node, - l3GatewayConfig, - hostSubnets, - hostAddrs, - clusterSubnets, - grLRPJoinIPs, // the joinIP allocated to this node for this controller's network - joinSwitchIPs, // the .1 of this controller's global joinSubnet - externalIPs, - ) -} - func physNetName(netInfo util.NetInfo) string { if netInfo.IsDefault() || netInfo.IsPrimaryNetwork() { return types.PhysicalNetworkName diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index a8e611119d..82118aeb94 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -469,7 +469,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -582,7 +582,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -701,7 +701,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -786,7 +786,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -871,7 +871,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { mgmtPortIP := "" // Disable option:gateway_mtu. - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -890,7 +890,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { // Enable option:gateway_mtu. expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -968,7 +968,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { // We don't set up the Allow from mgmt port ACL here mgmtPortIP := "" - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -989,7 +989,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { oldJoinLRPIPs := joinLRPIPs joinLRPIPs = ovntest.MustParseIPNets("100.64.0.99/16") expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1072,7 +1072,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1151,7 +1151,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { config.IPv4Mode = false config.IPv6Mode = true - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1231,7 +1231,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1309,7 +1309,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1419,7 +1419,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1531,7 +1531,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1618,7 +1618,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, @@ -1734,7 +1734,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, clusterIPSubnets, hostSubnets, diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index d035fdd299..64fb5c54d8 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -379,23 +379,17 @@ func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node) error { return fmt.Errorf("error getting gateway config for node %s: %v", node.Name, err) } - if gwConfig.config.Mode == config.GatewayModeDisabled { - if err := oc.newGatewayManager(node.Name).Cleanup(); err != nil { - return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) - } - } else { - if err := oc.newGatewayManager(node.Name).syncGatewayLogicalNetwork( - node, - gwConfig.config, - gwConfig.hostSubnets, - gwConfig.hostAddrs, - gwConfig.clusterSubnets, - gwConfig.gwLRPJoinIPs, - oc.ovnClusterLRPToJoinIfAddrs, - gwConfig.externalIPs, - ); err != nil { - return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) - } + if err := oc.newGatewayManager(node.Name).SyncGateway( + node, + gwConfig.config, + gwConfig.hostSubnets, + gwConfig.hostAddrs, + gwConfig.clusterSubnets, + gwConfig.gwLRPJoinIPs, + oc.ovnClusterLRPToJoinIfAddrs, + gwConfig.externalIPs, + ); err != nil { + return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) } if util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index d43b853008..a8c3637b53 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -580,7 +580,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 errs = append(errs, err) oc.gatewaysFailed.Store(node.Name, true) } else { - if err := gwManager.syncNodeGateway( + if err := gwManager.SyncGateway( node, gwConfig.config, gwConfig.hostSubnets, diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index ac945b2739..1585dd55f4 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -777,7 +777,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 errs = append(errs, fmt.Errorf("failed to generate node GW configuration: %v", err)) oc.gatewaysFailed.Store(node.Name, true) } else { - if err := gwManager.syncNodeGateway( + if err := gwManager.SyncGateway( node, gwConfig.config, gwConfig.hostSubnets, From 0473203be6277731f8e93973129b92e6e240128d Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 11:27:14 +0200 Subject: [PATCH 095/181] [gateway] pass gateway args for all controllers using GatewayConfig. It is much easier to track how each parameter is used and set, removes 3 different names for the same thing problem. Add ovnClusterLRPToJoinIfAddrs to the GatewayConfig. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 58 ++--- go-controller/pkg/ovn/gateway_test.go | 240 +++++++++++------- go-controller/pkg/ovn/master.go | 32 +-- go-controller/pkg/ovn/ovn.go | 8 +- .../secondary_layer2_network_controller.go | 30 +-- .../secondary_layer3_network_controller.go | 25 +- 6 files changed, 203 insertions(+), 190 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 8d084476a1..8177f1c0d3 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -803,11 +803,7 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* // enableGatewayMTU enables options:gateway_mtu for gateway routers. func (gw *GatewayManager) gatewayInit( nodeName string, - clusterIPSubnet []*net.IPNet, - hostSubnets []*net.IPNet, - l3GatewayConfig *util.L3GatewayConfig, - gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, - externalIPs []net.IP, + gwConfig *GatewayConfig, enableGatewayMTU bool, ) error { @@ -837,7 +833,7 @@ func (gw *GatewayManager) gatewayInit( } } - gwRouter, err := gw.createGWRouter(l3GatewayConfig, gwLRPJoinIPs) + gwRouter, err := gw.createGWRouter(gwConfig.config, gwConfig.gwLRPJoinIPs) if err != nil { return err } @@ -846,28 +842,28 @@ func (gw *GatewayManager) gatewayInit( return err } - gwLRPIPs, err := gw.createGWRouterPort(hostSubnets, gwLRPJoinIPs, enableGatewayMTU, gwRouter) + gwLRPIPs, err := gw.createGWRouterPort(gwConfig.hostSubnets, gwConfig.gwLRPJoinIPs, enableGatewayMTU, gwRouter) if err != nil { return err } if err := gw.addExternalSwitch("", - l3GatewayConfig.InterfaceID, + gwConfig.config.InterfaceID, gw.gwRouterName, - l3GatewayConfig.MACAddress.String(), + gwConfig.config.MACAddress.String(), physNetName(gw.netInfo), - l3GatewayConfig.IPAddresses, - l3GatewayConfig.VLANID); err != nil { + gwConfig.config.IPAddresses, + gwConfig.config.VLANID); err != nil { return err } - if l3GatewayConfig.EgressGWInterfaceID != "" { + if gwConfig.config.EgressGWInterfaceID != "" { if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, - l3GatewayConfig.EgressGWInterfaceID, + gwConfig.config.EgressGWInterfaceID, gw.gwRouterName, - l3GatewayConfig.EgressGWMACAddress.String(), + gwConfig.config.EgressGWMACAddress.String(), types.PhysicalNetworkExGwName, - l3GatewayConfig.EgressGWIPAddresses, + gwConfig.config.EgressGWIPAddresses, nil); err != nil { return err } @@ -883,20 +879,20 @@ func (gw *GatewayManager) gatewayInit( } externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName - if err = gw.updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAddrs, l3GatewayConfig, externalRouterPort, + if err = gw.updateGWRouterStaticRoutes(gwConfig.clusterSubnets, gwConfig.ovnClusterLRPToJoinIfAddrs, gwConfig.config, externalRouterPort, gwRouter); err != nil { return err } - if err = gw.updateClusterRouterStaticRoutes(hostSubnets, gwLRPIPs); err != nil { + if err = gw.updateClusterRouterStaticRoutes(gwConfig.hostSubnets, gwLRPIPs); err != nil { return err } - if err = gw.syncNATsForGRIPChange(externalIPs, oldExtIPs, gwLRPIPs, gwRouter, oldLogicalRouter); err != nil { + if err = gw.syncNATsForGRIPChange(gwConfig.externalIPs, oldExtIPs, gwLRPIPs, gwRouter, oldLogicalRouter); err != nil { return err } - if err = gw.updateGWRouterNAT(nodeName, clusterIPSubnet, l3GatewayConfig, externalIPs, gwLRPIPs, gwRouter); err != nil { + if err = gw.updateGWRouterNAT(nodeName, gwConfig.clusterSubnets, gwConfig.config, gwConfig.externalIPs, gwLRPIPs, gwRouter); err != nil { return err } @@ -1333,20 +1329,15 @@ func (gw *GatewayManager) isRoutingAdvertised(node string) bool { // SyncGateway ensures a node's gateway router is configured according to the L3 config and host subnets func (gw *GatewayManager) SyncGateway( node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, - clusterSubnets, grLRPJoinIPs []*net.IPNet, - ovnClusterLRPToJoinIfAddrs []*net.IPNet, - externalIPs []net.IP, + gwConfig *GatewayConfig, ) error { - if l3GatewayConfig.Mode == config.GatewayModeDisabled { + if gwConfig.config.Mode == config.GatewayModeDisabled { if err := gw.Cleanup(); err != nil { return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) } return nil } - if hostSubnets == nil { + if gwConfig.hostSubnets == nil { return nil } @@ -1354,12 +1345,7 @@ func (gw *GatewayManager) SyncGateway( err := gw.gatewayInit( node.Name, - clusterSubnets, - hostSubnets, - l3GatewayConfig, - grLRPJoinIPs, // the joinIP allocated to this node's GR for this controller's network - ovnClusterLRPToJoinIfAddrs, - externalIPs, + gwConfig, enableGatewayMTU, ) if err != nil { @@ -1370,16 +1356,16 @@ func (gw *GatewayManager) SyncGateway( if gw.clusterRouterName == "" { routerName = gw.gwRouterName } - for _, subnet := range hostSubnets { + for _, subnet := range gwConfig.hostSubnets { mgmtIfAddr := util.GetNodeManagementIfAddr(subnet) if mgmtIfAddr == nil { return fmt.Errorf("management interface address not found for subnet %q on network %q", subnet, gw.netInfo.GetNetworkName()) } - l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), l3GatewayConfig.IPAddresses) + l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.config.IPAddresses) if err != nil { return fmt.Errorf("failed to extract the gateway IP addr for network %q: %v", gw.netInfo.GetNetworkName(), err) } - relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(mgmtIfAddr.IP), hostAddrs) + relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.hostAddrs) if err != nil && err != util.ErrorNoIP { return fmt.Errorf("failed to extract the host IP addrs for network %q: %v", gw.netInfo.GetNetworkName(), err) } diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 82118aeb94..3b15905e13 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -464,6 +464,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -471,12 +480,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -577,6 +581,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -584,12 +597,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -696,6 +704,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -703,12 +720,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -781,6 +793,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { IPAddresses: ovntest.MustParseIPNets("169.255.33.2/24"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -788,12 +809,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -859,7 +875,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } - + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -873,12 +897,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { // Disable option:gateway_mtu. err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, false, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -892,12 +911,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { expectedOVNClusterRouter.StaticRoutes = []string{} err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -957,6 +971,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -970,12 +993,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, false, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -988,15 +1006,11 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.By("modifying the node join IP") oldJoinLRPIPs := joinLRPIPs joinLRPIPs = ovntest.MustParseIPNets("100.64.0.99/16") + gwConfig.gwLRPJoinIPs = joinLRPIPs expectedOVNClusterRouter.StaticRoutes = []string{} err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1067,6 +1081,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -1074,12 +1097,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1143,6 +1161,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { IPAddresses: ovntest.MustParseIPNets("fd99::2/64"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -1153,12 +1180,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1226,6 +1248,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1", "fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -1233,12 +1264,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1303,6 +1329,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error @@ -1311,12 +1346,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1413,6 +1443,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error @@ -1421,12 +1460,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1526,6 +1560,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1", "fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -1533,12 +1576,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1612,6 +1650,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error @@ -1620,12 +1667,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1727,6 +1769,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error @@ -1736,12 +1787,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { expectedOVNClusterRouter.StaticRoutes = []string{} err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 13d5283012..2b02dbf91a 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -33,13 +33,14 @@ const ( OvnNodeAnnotationRetryTimeout = 1 * time.Second ) -type L3GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - clusterSubnets []*net.IPNet - gwLRPJoinIPs []*net.IPNet - hostAddrs []string - externalIPs []net.IP +type GatewayConfig struct { + config *util.L3GatewayConfig + hostSubnets []*net.IPNet + clusterSubnets []*net.IPNet + gwLRPJoinIPs []*net.IPNet + hostAddrs []string + externalIPs []net.IP + ovnClusterLRPToJoinIfAddrs []*net.IPNet } // SetupMaster creates the central router and load-balancers for the network @@ -91,7 +92,7 @@ func (oc *DefaultNetworkController) syncNodeManagementPortDefault(node *corev1.N return err } -func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*L3GatewayConfig, error) { +func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, err @@ -133,13 +134,14 @@ func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*L3Gat } } - return &L3GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - clusterSubnets: clusterSubnets, - gwLRPJoinIPs: gwLRPIPs, - hostAddrs: hostAddrs, - externalIPs: externalIPs, + return &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: oc.ovnClusterLRPToJoinIfAddrs, }, nil } diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 64fb5c54d8..293e23f4aa 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -381,13 +381,7 @@ func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node) error { if err := oc.newGatewayManager(node.Name).SyncGateway( node, - gwConfig.config, - gwConfig.hostSubnets, - gwConfig.hostAddrs, - gwConfig.clusterSubnets, - gwConfig.gwLRPJoinIPs, - oc.ovnClusterLRPToJoinIfAddrs, - gwConfig.externalIPs, + gwConfig, ); err != nil { return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index a8c3637b53..74ad6eab88 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -582,13 +582,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 } else { if err := gwManager.SyncGateway( node, - gwConfig.config, - gwConfig.hostSubnets, - nil, - gwConfig.hostSubnets, - gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network - nil, // no need for ovnClusterLRPToJoinIfAddrs - gwConfig.externalIPs, + gwConfig, ); err != nil { errs = append(errs, err) oc.gatewaysFailed.Store(node.Name, true) @@ -795,14 +789,7 @@ func (oc *SecondaryLayer2NetworkController) deleteUDNClusterSubnetEgressSNAT(loc return nil } -type SecondaryL2GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - gwLRPJoinIPs []*net.IPNet - externalIPs []net.IP -} - -func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*SecondaryL2GatewayConfig, error) { +func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -842,11 +829,14 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) - return &SecondaryL2GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - gwLRPJoinIPs: gwLRPJoinIPs, - externalIPs: externalIPs, + return &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: hostSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + hostAddrs: nil, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: nil, }, nil } diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index 1585dd55f4..e550000318 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -779,13 +779,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 } else { if err := gwManager.SyncGateway( node, - gwConfig.config, - gwConfig.hostSubnets, - gwConfig.hostAddrs, - gwConfig.clusterSubnets, - gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network - oc.ovnClusterLRPToJoinIfAddrs, // the .1 of this controller's global joinSubnet - gwConfig.externalIPs, + gwConfig, ); err != nil { errs = append(errs, fmt.Errorf( "failed to sync node GW for network %q: %v", @@ -1045,7 +1039,7 @@ func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { return nil } -func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*L3GatewayConfig, error) { +func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -1095,13 +1089,14 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) - return &L3GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - clusterSubnets: clusterSubnets, - gwLRPJoinIPs: gwLRPJoinIPs, - hostAddrs: hostAddrs, - externalIPs: externalIPs, + return &GatewayConfig{ + config: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: oc.ovnClusterLRPToJoinIfAddrs, }, nil } From 6395072de4c04a07e6a16f4d44d5025041321af5 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 17:53:22 +0200 Subject: [PATCH 096/181] [gateway] rename GatewayConfig config to annoConfig Signed-off-by: Nadia Pinaeva --- go-controller/pkg/ovn/gateway.go | 26 ++++++++--------- go-controller/pkg/ovn/gateway_test.go | 28 +++++++++---------- go-controller/pkg/ovn/master.go | 4 +-- .../secondary_layer2_network_controller.go | 2 +- .../secondary_layer3_network_controller.go | 2 +- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 8177f1c0d3..a43adf5368 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -833,7 +833,7 @@ func (gw *GatewayManager) gatewayInit( } } - gwRouter, err := gw.createGWRouter(gwConfig.config, gwConfig.gwLRPJoinIPs) + gwRouter, err := gw.createGWRouter(gwConfig.annoConfig, gwConfig.gwLRPJoinIPs) if err != nil { return err } @@ -848,22 +848,22 @@ func (gw *GatewayManager) gatewayInit( } if err := gw.addExternalSwitch("", - gwConfig.config.InterfaceID, + gwConfig.annoConfig.InterfaceID, gw.gwRouterName, - gwConfig.config.MACAddress.String(), + gwConfig.annoConfig.MACAddress.String(), physNetName(gw.netInfo), - gwConfig.config.IPAddresses, - gwConfig.config.VLANID); err != nil { + gwConfig.annoConfig.IPAddresses, + gwConfig.annoConfig.VLANID); err != nil { return err } - if gwConfig.config.EgressGWInterfaceID != "" { + if gwConfig.annoConfig.EgressGWInterfaceID != "" { if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, - gwConfig.config.EgressGWInterfaceID, + gwConfig.annoConfig.EgressGWInterfaceID, gw.gwRouterName, - gwConfig.config.EgressGWMACAddress.String(), + gwConfig.annoConfig.EgressGWMACAddress.String(), types.PhysicalNetworkExGwName, - gwConfig.config.EgressGWIPAddresses, + gwConfig.annoConfig.EgressGWIPAddresses, nil); err != nil { return err } @@ -879,7 +879,7 @@ func (gw *GatewayManager) gatewayInit( } externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName - if err = gw.updateGWRouterStaticRoutes(gwConfig.clusterSubnets, gwConfig.ovnClusterLRPToJoinIfAddrs, gwConfig.config, externalRouterPort, + if err = gw.updateGWRouterStaticRoutes(gwConfig.clusterSubnets, gwConfig.ovnClusterLRPToJoinIfAddrs, gwConfig.annoConfig, externalRouterPort, gwRouter); err != nil { return err } @@ -892,7 +892,7 @@ func (gw *GatewayManager) gatewayInit( return err } - if err = gw.updateGWRouterNAT(nodeName, gwConfig.clusterSubnets, gwConfig.config, gwConfig.externalIPs, gwLRPIPs, gwRouter); err != nil { + if err = gw.updateGWRouterNAT(nodeName, gwConfig.clusterSubnets, gwConfig.annoConfig, gwConfig.externalIPs, gwLRPIPs, gwRouter); err != nil { return err } @@ -1331,7 +1331,7 @@ func (gw *GatewayManager) SyncGateway( node *corev1.Node, gwConfig *GatewayConfig, ) error { - if gwConfig.config.Mode == config.GatewayModeDisabled { + if gwConfig.annoConfig.Mode == config.GatewayModeDisabled { if err := gw.Cleanup(); err != nil { return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) } @@ -1361,7 +1361,7 @@ func (gw *GatewayManager) SyncGateway( if mgmtIfAddr == nil { return fmt.Errorf("management interface address not found for subnet %q on network %q", subnet, gw.netInfo.GetNetworkName()) } - l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.config.IPAddresses) + l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.annoConfig.IPAddresses) if err != nil { return fmt.Errorf("failed to extract the gateway IP addr for network %q: %v", gw.netInfo.GetNetworkName(), err) } diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 3b15905e13..8e87ba0afd 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -465,7 +465,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -582,7 +582,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -705,7 +705,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -794,7 +794,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -876,7 +876,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -972,7 +972,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1082,7 +1082,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1162,7 +1162,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1249,7 +1249,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1330,7 +1330,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1444,7 +1444,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1561,7 +1561,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1651,7 +1651,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, @@ -1770,7 +1770,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NodePortEnable: true, } gwConfig := &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterIPSubnets, gwLRPJoinIPs: joinLRPIPs, diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 2b02dbf91a..b5394c7ffe 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -34,7 +34,7 @@ const ( ) type GatewayConfig struct { - config *util.L3GatewayConfig + annoConfig *util.L3GatewayConfig hostSubnets []*net.IPNet clusterSubnets []*net.IPNet gwLRPJoinIPs []*net.IPNet @@ -135,7 +135,7 @@ func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*Gatew } return &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterSubnets, gwLRPJoinIPs: gwLRPIPs, diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index 74ad6eab88..7ce63fc278 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -830,7 +830,7 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) return &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: hostSubnets, gwLRPJoinIPs: gwLRPJoinIPs, diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index e550000318..b2355b9100 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -1090,7 +1090,7 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) return &GatewayConfig{ - config: l3GatewayConfig, + annoConfig: l3GatewayConfig, hostSubnets: hostSubnets, clusterSubnets: clusterSubnets, gwLRPJoinIPs: gwLRPJoinIPs, From 005427354a0533550efb1236175f1baadb6cf797 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Wed, 2 Jul 2025 18:16:53 -0700 Subject: [PATCH 097/181] VF gateway trigger errors message in updateServiceFlowCache When gateway accelerated interface is used, we noticed the error messages 'gateway_shared_intf.go:392] Unable to get port list from bridge. ... failed to get list of ports on bridge "enp1s0f0v0":, stderr: "ovs-ofctl: enp1s0f0v0 is not a bridge or a socket\n" ...'. Also, bridgeConfiguration.getGatewayIface() function is confusing, as b.gwIface is always non-empty, so one can just directly use b.gwIface. Signed-off-by: Yun Zhou --- .../pkg/node/default_node_network_controller.go | 10 ++-------- go-controller/pkg/node/gateway.go | 12 ++---------- go-controller/pkg/node/gateway_shared_intf.go | 16 ++++++++-------- go-controller/pkg/node/gateway_udn.go | 2 +- go-controller/pkg/node/gateway_udn_test.go | 2 +- .../pkg/node/node_ip_handler_linux_test.go | 2 +- 6 files changed, 15 insertions(+), 29 deletions(-) diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 7a75c36984..3b120dc579 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -1186,10 +1186,8 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // is not needed. Future upgrade flows will need to take DPUs into account. if config.OvnKubeNode.Mode != types.NodeModeDPUHost { if config.OvnKubeNode.Mode == types.NodeModeFull { - bridgeName := nc.Gateway.GetGatewayIface() - // Configure route for svc towards shared gw bridge - // Have to have the route to bridge for multi-NIC mode, where the default gateway may go to a non-OVS interface - if err := configureSvcRouteViaBridge(nc.routeManager, bridgeName); err != nil { + // Configure route for svc towards shared gateway interface + if err := configureSvcRouteViaInterface(nc.routeManager, nc.Gateway.GetGatewayIface(), DummyNextHopIPs()); err != nil { return err } } @@ -1655,10 +1653,6 @@ func getPMTUDKey(nodeName string) string { return fmt.Sprintf("%s_pmtud", nodeName) } -func configureSvcRouteViaBridge(routeManager *routemanager.Controller, bridge string) error { - return configureSvcRouteViaInterface(routeManager, bridge, DummyNextHopIPs()) -} - // DummyNextHopIPs returns the fake next hops used for service traffic routing. // It is used in: // - br-ex, where we don't really care about the next hop GW in use as traffic is always routed to OVN diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 38a7ad2910..db1bcae279 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -465,7 +465,7 @@ func (g *gateway) GetGatewayBridgeIface() string { } func (g *gateway) GetGatewayIface() string { - return g.openflowManager.defaultBridge.getGatewayIface() + return g.openflowManager.defaultBridge.gwIface } // getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle @@ -556,19 +556,11 @@ type bridgeConfiguration struct { nextHops []net.IP } -func (b *bridgeConfiguration) getGatewayIface() string { - // If gwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. - if b.gwIface != "" { - return b.gwIface - } - return b.bridgeName -} - // updateInterfaceIPAddresses sets and returns the bridge's current ips func (b *bridgeConfiguration) updateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { b.Lock() defer b.Unlock() - ifAddrs, err := getNetworkInterfaceIPAddresses(b.getGatewayIface()) + ifAddrs, err := getNetworkInterfaceIPAddresses(b.gwIface) if err != nil { return nil, err } diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 7556aa54f7..a8d3b81aa7 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -2476,15 +2476,15 @@ func newGateway( // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwBridge.getGatewayIface(), nodeName, watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gwBridge.gwIface, nodeName, watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwBridge.getGatewayIface()); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.getGatewayIface(), err) + if err := setNodeMasqueradeIPOnExtBridge(gwBridge.gwIface); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.gwIface, err) } - if err := addMasqueradeRoute(routeManager, gwBridge.getGatewayIface(), nodeName, gwIPs, watchFactory); err != nil { + if err := addMasqueradeRoute(routeManager, gwBridge.gwIface, nodeName, gwIPs, watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -2531,7 +2531,7 @@ func newGateway( gw.openflowManager.requestFlowSync() } - if err := addHostMACBindings(gwBridge.getGatewayIface()); err != nil { + if err := addHostMACBindings(gwBridge.gwIface); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing: %w", err) } @@ -2593,11 +2593,11 @@ func newNodePortWatcher( subnets = append(subnets, config.Kubernetes.ServiceCIDRs...) if config.Gateway.DisableForwarding { if err := initExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.getGatewayIface(), err) + return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) } } else { if err := delExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.getGatewayIface(), err) + return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) } } @@ -2615,7 +2615,7 @@ func newNodePortWatcher( gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - gwBridge: gwBridge.getGatewayIface(), + gwBridge: gwBridge.bridgeName, serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 3e2ff143c9..d991fc74eb 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -268,7 +268,7 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeL if gw.openflowManager == nil { return nil, fmt.Errorf("openflow manager has not been provided for network: %s", netInfo.GetNetworkName()) } - intfName := gw.openflowManager.defaultBridge.getGatewayIface() + intfName := gw.openflowManager.defaultBridge.gwIface link, err := util.GetNetLinkOps().LinkByName(intfName) if err != nil { return nil, fmt.Errorf("unable to get link for %s, error: %v", intfName, err) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 1c02ffbbdb..4d73529c86 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -343,7 +343,7 @@ func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfigurat func getDummyOpenflowManager() *openflowManager { gwBridge := &bridgeConfiguration{ - gwIface: "", + gwIface: "breth0", bridgeName: "breth0", } ofm := &openflowManager{ diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index d8ff6710d9..ee10bbfc41 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -401,7 +401,7 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { mpmock := &nodemocks.ManagementPort{} mpmock.On("GetAddresses").Return([]*net.IPNet{tc.mgmtPortIP4, tc.mgmtPortIP6}) - fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0"} + fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0", gwIface: "breth0"} k := &kube.Kube{KClient: tc.fakeClient} tc.ipManager = newAddressManagerInternal(nodeName, k, mpmock, tc.watchFactory, fakeBridgeConfiguration, useNetlink) From c55d657709e84dd96830108d9ed22fce4539f547 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Thu, 8 May 2025 10:12:41 +0200 Subject: [PATCH 098/181] OKEP: Pre-assigned network configuration for primary user defined networks workloads Co-authored-by: Miguel Duarte Barroso Signed-off-by: Patryk Diak --- .../okep-5233-preconfigured-udn-addresses.md | 514 ++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 515 insertions(+) create mode 100644 docs/okeps/okep-5233-preconfigured-udn-addresses.md diff --git a/docs/okeps/okep-5233-preconfigured-udn-addresses.md b/docs/okeps/okep-5233-preconfigured-udn-addresses.md new file mode 100644 index 0000000000..332fec24fb --- /dev/null +++ b/docs/okeps/okep-5233-preconfigured-udn-addresses.md @@ -0,0 +1,514 @@ +# OKEP-5233: Predefined addresses for primary user defined networks workloads + +* Issue: [#5233](https://github.com/ovn-org/ovn-kubernetes/issues/5233) + +## Problem Statement + +Migrating legacy workloads with predefined network configurations (IP, MAC, default gateway) +to OVN-Kubernetes is currently not possible. There is a need to import these workloads, preserving +their network configuration, while also enabling non-NATed traffic to better integrate with +existing infrastructures. + +## Goals + +* Enable pods on primary Layer2 User Defined Network (UDN) and Cluster UDN to use a predefined static network + configuration including IP address, MAC address, and default gateway. +* Ensure it is possible to enable non-NATed traffic for pods with predefined static network configuration + by exposing the Layer2 Cluster UDN through BGP (see [Risks, Known Limitations and Mitigations](#risks-known-limitations-and-mitigations) for current BGP support limitations). + +## Non-Goals + +* Modifying the default gateway and management IPs of a primary UDN after it was created. +* Modifying a pod's network configuration after the pod was created. +* Non-NATed traffic support in secondary networks. +* Predefined IP/MAC addresses support for pods in Layer3 UDNs. +* Configurable default gateway and infrastructure addresses in Layer3 UDNs. +* Predefined IP/MAC addresses support for pods in Localnet UDNs. +* Configuring default gateway and infrastructure addresses in Layer2 (Cluster) UDNs that do not belong to the networks subnets. +* No-downtime workload migration. + +## Introduction + +Legacy workloads, particularly virtual machines, are often set up with static +network configurations. When migrating to OVN-Kubernetes UDNs, +it should be possible to integrate these gradually to prevent disruptions. + +Currently, OVN-Kubernetes allocates IP addresses dynamically and it generates the MAC +addresses from it. It sets the pod's default gateway to the first usable IP address of its subnet. +For primary UDNs, it additionally reserves the second usable IP address for the internal management port which +excludes it from being available for workloads. + +## User-Stories/Use-Cases + +* As a user, I want to define a custom default gateway IP for a new primary Layer2 UDN +so that my migrated workloads can maintain their existing network configuration without disruption. + +* As a user, I want the ability to configure a new primary Layer2 UDN with a custom management IP +address to prevent IP conflicts with the workloads I am importing. + +* As a user, I want to assign a predefined IP address and MAC address to a pod to ensure the +network identity of my imported workload is maintained. + +* As a user, I want to prevent OVN-Kubernetes from automatically assigning IP addresses that are +already in use by my existing infrastructure, so that I can migrate my services gradually without network conflicts. + +## Proposed Solution + +### Primary UDN configuration + +To support the migration of pre-configured workloads, the UDN and cluster UDN API has to +be enhanced. The aim is to provide control over the IP addresses that OVN-Kubernetes +consumes in the overlay network, this includes the default gateway and management IPs. +The proposed changes are specified in the [Layer2 User Defined Network API changes](#layer2-user-defined-network-api-changes) section. + +### Pod network identity + +OVN-Kubernetes currently supports configuring pods' secondary network interfaces through +the `k8s.v1.cni.cncf.io/networks` annotation, which contains a JSON array of +[NetworkSelectionElement](https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/e12bd55d48a1f798a1720218819063f5903b72e3/pkg/apis/k8s.cni.cncf.io/v1/types.go#L136-L171) +objects. Additionally, it is possible to modify the cluster's default network attachment by +setting the `v1.multus-cni.io/default-network` annotation to a singular NetworkSelectionElement +object. + +To enable using predefined MAC and IP addresses on pods attached to a primary UDN, +the `v1.multus-cni.io/default-network` will be reused, as it is a well-known annotation for +configuring the pod's default network. The `k8s.v1.cni.cncf.io/networks` annotation is specific to +secondary networks and expects a list of networks, which does not fit well with primary UDNs. +With the proposed approach, the `k8s.ovn.org/primary-udn-ipamclaim` annotation, used to link a +pod with a matching claim, will be deprecated in favor of the `IPAMClaimReference` field in the +NetworkSelectionElement. When `IPAMClaimReference` is specified we will update its status to reflect +the result of the IP allocation, see [IPAMClaim API changes](#ipamclaim-api-changes). +OVN-Kubernetes will keep track of all allocated MAC and IP addresses to detect conflicts. +When a conflict is detected, OVN-Kubernetes will emit a Kubernetes event to the pod indicating +the specific conflict (IP or MAC address already in use) and prevent the pod from starting. + +```mermaid +%%{init: { 'sequence': {'messageAlign': 'left'} }}%% +sequenceDiagram +actor User +participant K8s_API_Server as "K8s API Server" +participant OVN_K_Controller as "OVN-Kubernetes" + +note over User, K8s_API_Server: Pre-Step: User defines UDN
(Optional) with custom Default Gateway / Management IP + +User->>K8s_API_Server: Create Pod with annotation:
'v1.multus-cni.io/default-network':
[{
name: 'default',
namespace: 'ovn-kubernetes',
ips: ['10.0.0.10'],
mac: '00:1A:2B:3C:4D:5E',
ipam-claim-reference: 'my-claim'
}] + + +K8s_API_Server->>OVN_K_Controller: Notify: New Pod Spec + +OVN_K_Controller->>OVN_K_Controller: Parse 'v1.multus-cni.io/default-network' annotation
Perform IP/MAC conflict check within UDN
(Verify requested IP/MAC are not in use) + + +alt "No IP/MAC Conflict" +opt "IPAMClaimReference is specified in annotation" +OVN_K_Controller->>K8s_API_Server: Update Status conditions and addresses of referenced IPAMClaim +end + +OVN_K_Controller->>OVN_K_Controller: Configure Pod's Primary Network Interface +note right of OVN_K_Controller: Pod provisioning succeeds +else "IP/MAC Conflict Detected in UDN" +opt "IPAMClaimReference is specified in annotation" +OVN_K_Controller->>K8s_API_Server: Update Status conditions of referenced IPAMClaim +end +OVN_K_Controller->>K8s_API_Server: Emit IP/MAC Conflict error event to the Pod +note right of OVN_K_Controller: Pod provisioning fails +end +``` + +### API Details + +#### Layer2 User Defined Network API changes + +Proposed API change adds `infrastructureSubnets` `reservedSubnets` and `defaultGatewayIPs` fields to the `Layer2Config` which is a part of both +the [UDN](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/a3d0a2b238bef9b1399b3342228d75504afed18b/go-controller/pkg/crd/userdefinednetwork/v1/udn.go#L47) +and [cluster UDN](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/a3d0a2b238bef9b1399b3342228d75504afed18b/go-controller/pkg/crd/userdefinednetwork/v1/cudn.go#L63) specs: + +```diff +// +kubebuilder:validation:XValidation:rule="has(self.ipam) && has(self.ipam.mode) && self.ipam.mode != 'Enabled' || has(self.subnets)", message="Subnets is required with ipam.mode is Enabled or unset" +// +kubebuilder:validation:XValidation:rule="!has(self.ipam) || !has(self.ipam.mode) || self.ipam.mode != 'Disabled' || !has(self.subnets)", message="Subnets must be unset when ipam.mode is Disabled" +// +kubebuilder:validation:XValidation:rule="!has(self.ipam) || !has(self.ipam.mode) || self.ipam.mode != 'Disabled' || self.role == 'Secondary'", message="Disabled ipam.mode is only supported for Secondary network" +// +kubebuilder:validation:XValidation:rule="!has(self.joinSubnets) || has(self.role) && self.role == 'Primary'", message="JoinSubnets is only supported for Primary network" +// +kubebuilder:validation:XValidation:rule="!has(self.subnets) || !has(self.mtu) || !self.subnets.exists_one(i, isCIDR(i) && cidr(i).ip().family() == 6) || self.mtu >= 1280", message="MTU should be greater than or equal to 1280 when IPv6 subnet is used" ++ // +kubebuilder:validation:XValidation:rule="!has(self.defaultGatewayIPs) || has(self.role) && self.role == 'Primary'", message="defaultGatewayIPs is only supported for Primary network" ++ // +kubebuilder:validation:XValidation:rule="!has(self.defaultGatewayIPs) || self.defaultGatewayIPs.all(ip, self.subnets.exists(subnet, cidr(subnet).containsIP(ip)))", message="defaultGatewayIPs must belong to one of the subnets specified in the subnets field" ++ // +kubebuilder:validation:XValidation:rule="!has(self.reservedSubnets) || has(self.reservedSubnets) && has(self.subnets)", message="reservedSubnets must be unset when subnets is unset" ++ // +kubebuilder:validation:XValidation:rule="!has(self.reservedSubnets) || self.reservedSubnets.all(e, self.subnets.exists(s, cidr(s).containsCIDR(cidr(e))))",message="reservedSubnets must be subnetworks of the networks specified in the subnets field",fieldPath=".reservedSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || has(self.infrastructureSubnets) && has(self.subnets)", message="infrastructureSubnets must be unset when subnets is unset" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || self.infrastructureSubnets.all(e, self.subnets.exists(s, cidr(s).containsCIDR(cidr(e))))",message="infrastructureSubnets must be subnetworks of the networks specified in the subnets field",fieldPath=".infrastructureSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || !has(self.defaultGatewayIPs) || self.defaultGatewayIPs.all(ip, self.infrastructureSubnets.exists(subnet, cidr(subnet).containsIP(ip)))", message="defaultGatewayIPs have to belong to infrastructureSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || !has(self.reservedSubnets) || self.infrastructureSubnets.all(infra, !self.reservedSubnets.exists(reserved, cidr(infra).containsCIDR(reserved) || cidr(reserved).containsCIDR(infra)))", message="infrastructureSubnets and reservedSubnets must not overlap" +type Layer2Config struct { + +// Role describes the network role in the pod. +// +// Allowed value is "Secondary". +// Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. +// +// +kubebuilder:validation:Enum=Primary;Secondary +// +kubebuilder:validation:Required +// +required +Role NetworkRole `json:"role"` + +// MTU is the maximum transmission unit for a network. +// MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. +// +// +kubebuilder:validation:Minimum=576 +// +kubebuilder:validation:Maximum=65536 +// +optional +MTU int32 `json:"mtu,omitempty"` + +// Subnets are used for the pod network across the cluster. +// Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. +// +// The format should match standard CIDR notation (for example, "10.128.0.0/16"). +// This field must be omitted if `ipam.mode` is `Disabled`. +// +// +optional +Subnets DualStackCIDRs `json:"subnets,omitempty"` + ++ // reservedSubnets specifies a list of CIDRs reserved for static IP assignment, excluded from automatic allocation. ++ // reservedSubnets is optional. When omitted, all IP addresses in `subnets` are available for automatic assignment. ++ // IPs from these ranges can still be requested through static IP assignment in pod annotations. ++ // Each item should be in range of the specified CIDR(s) in `subnets`. ++ // The maximum number of entries allowed is 25. ++ // The format should match standard CIDR notation (for example, "10.128.0.0/16"). ++ // This field must be omitted if `subnets` is unset or `ipam.mode` is `Disabled`. ++ // +optional ++ // +kubebuilder:validation:MinItems=1 ++ // +kubebuilder:validation:MaxItems=25 ++ ReservedSubnets []CIDR `json:"reservedSubnets,omitempty"` + +// JoinSubnets are used inside the OVN network topology. +// +// Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. +// This field is only allowed for "Primary" network. +// It is not recommended to set this field without explicit need and understanding of the OVN network topology. +// When omitted, the platform will choose a reasonable default which is subject to change over time. +// +// +optional +JoinSubnets DualStackCIDRs `json:"joinSubnets,omitempty"` + ++ // infrastructureSubnets specifies a list of internal CIDR ranges that OVN-Kubernetes will reserve for internal network infrastructure. ++ // Any IP addresses within these ranges cannot be assigned to workloads. ++ // When omitted, OVN-Kubernetes will automatically allocate IP addresses from `subnets` for its infrastructure needs. ++ // When `reservedSubnets` is also specified the CIDRs cannot overlap. ++ // When `defaultGatewayIPs` is also specified the default gateway IPs must belong to one of the CIDRs. ++ // Each item should be in range of the specified CIDR(s) in `subnets`. ++ // The maximum number of entries allowed is 10. ++ // The format should match standard CIDR notation (for example, "10.128.0.0/16"). ++ // This field must be omitted if `subnets` is unset or `ipam.mode` is `Disabled`. ++ // +optional ++ // +kubebuilder:validation:MinItems=1 ++ // +kubebuilder:validation:MaxItems=10 ++ InfrastructureSubnets []CIDR `json:"infrastructureSubnets,omitempty"` + ++ // defaultGatewayIPs specifies the default gateway IP used in the internal OVN topology. ++ // ++ // Dual-stack clusters may set 2 IPs (one for each IP family), otherwise only 1 IP is allowed. ++ // This field is only allowed for "Primary" network. ++ // It is not recommended to set this field without explicit need and understanding of the OVN network topology. ++ // When omitted, an IP from network subnet is used. ++ // ++ // +optional ++ DefaultGatewayIPs DualStackIPs `json:"defaultGatewayIPs,omitempty"` + +// IPAM section contains IPAM-related configuration for the network. +// +optional +IPAM *IPAMConfig `json:"ipam,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="isIP(self)", message="IP is invalid" +type IP string + +// +kubebuilder:validation:MinItems=1 +// +kubebuilder:validation:MaxItems=2 +// +kubebuilder:validation:XValidation:rule="size(self) != 2 || !isIP(self[0]) || !isIP(self[1]) || ip(self[0]).family() != ip(self[1]).family()", message="When 2 IPs are set, they must be from different IP families" +type DualStackIPs []IP + +``` + +The API changes mentioned above will be carried to the `NetworkAttachmentDefinition` JSON spec. + +#### IPAMClaim API changes + +The following pull request is tracking the IPAMClaim API change that introduces the status conditions: + + +[IPAMClaim CRD doc](https://docs.google.com/document/d/1OQIJIrCtsYpR5O44w0hpoJ2TyKBz1Du-KhRT4RtrAjk) - `IPAM allocation on behalf of other entities` section + +### Usage Example + +A user migrating services wants to import a workload pod preserving it's original IP address. +Workload data: + +```yaml +IP: 192.168.100.205 +MAC: 00:1A:2B:3C:4D:5E +Default Gateway: 192.168.100.2 +``` + +```yaml +apiVersion: k8s.ovn.org/v1 +kind: ClusterUserDefinedNetwork +metadata: + name: network-l2 +spec: + topology: "Layer2" + layer2: + role: Primary + subnets: ["192.168.100.0/24"] + infrastructureSubnets: ["192.168.100.0/30"] # used for OVN-Kubernetes infrastructure + reservedSubnets: ["192.168.100.200/29"] # reserved for workloads that will require predefined addresses + defaultGatewayIPs: ["192.168.100.2"] +``` + +With this configuration, OVN-Kubernetes automatically assigns IPs from `.4-.199` and `.208-.254` for new workloads, while pods can request specific IPs from the reserved range: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: migrated-app + annotations: + v1.multus-cni.io/default-network: | + {"name": "default", "namespace": "ovn-kubernetes", "ips": ["192.168.100.205"], "mac": "00:1A:2B:3C:4D:5E", "ipam-claim-reference": "my-claim"} +spec: +``` + +### Implementation Details + +#### Configurability + +The changes outlined in this enhancement should be configurable. This means a configuration knob +is required to instruct OVN-Kubernetes on whether to process the annotation described in the +[Pod network identity](#pod-network-identity) section. The feature knob will be called `preconfigured-udn-addresses-enable`. + +#### NetworkSelectionElement annotation + +Currently, the `v1.multus-cni.io/default-network` annotation is only processed for the cluster default network. +This enhancement will extend this behavior, allowing it to be applied to pods created in the primary Layer2 UDN as well. +The annotation should only be processed for new pods, modifying it after the addresses were allocated won't +be reflected in the pods network configuration and this should be blocked through a +[Validating Admission Policy](https://kubernetes.io/docs/reference/access-authn-authz/validating-admission-policy/): + +```yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: predefined-network-addresses +spec: + matchConstraints: + resourceRules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["UPDATE"] + resources: ["pods"] + failurePolicy: Fail + validations: + - expression: "('v1.multus-cni.io/default-network' in oldObject.metadata.annotations) == ('v1.multus-cni.io/default-network' in object.metadata.annotations)" + message: "The 'v1.multus-cni.io/default-network' annotation cannot be changed after the pod was created" +``` + +The `NetworkSelectionElement` structure has an extensive list of fields, this enhancement +focuses only on the following: + +```cgo +type NetworkSelectionElement struct { + // Name contains the name of the Network object this element selects + Name string `json:"name"` + // Namespace contains the optional namespace that the network referenced + // by Name exists in + Namespace string `json:"namespace,omitempty"` + // IPRequest contains an optional requested IP addresses for this network + // attachment + IPRequest []string `json:"ips,omitempty"` + // MacRequest contains an optional requested MAC address for this + // network attachment + MacRequest string `json:"mac,omitempty"` + // IPAMClaimReference container the IPAMClaim name where the IPs for this + // attachment will be located. + IPAMClaimReference string `json:"ipam-claim-reference,omitempty"` +} +``` + +Any other field set in the struct will be ignored by OVN-Kubernetes. + +When using the `v1.multus-cni.io/default-network` annotation, Multus strictly requires its value to reference an +existing NAD. Multus then builds the CNI requests based on it. +This proposal introduces a static default NAD object applied to the cluster. This object will serve as a +stub to generate the CNI calls, preserving the current behavior: + +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: default + namespace: ovn-kubernetes +spec: + config: '{"cniVersion": "0.4.0", "name": "ovn-kubernetes", "type": "ovn-k8s-cni-overlay"}' +``` + +With this approach, users must configure the `Name` to `default` and the `Namespace` to `ovn-kubernetes`. +This configuration ensures Multus still references the default network while OVN-Kubernetes will internally use the +primary UDN to handle MAC/IP requests from the NSE. + +> The default NAD object specified above is already used when the default network is exposed through BGP as +part of the route advertisement feature. The proposal is to have it available all the time. + +With `k8s.ovn.org/primary-udn-ipamclaim` being deprecated in favor of the `IPAMClaimReference` field +in the `NetworkSelectionElement` we have to define the expected behavior. To avoid conflicting +settings when `v1.multus-cni.io/default-network` is set the `k8s.ovn.org/primary-udn-ipamclaim` is +going to be ignored, it will be reflected in the opposite scenario for backwards compatibility +with a plan to remove it in a future release. +Deprecation plan for the `k8s.ovn.org/primary-udn-ipamclaim` annotation: + +* release-N - emit a warning event stating that the annotation is deprecated and will be removed in a future release. +* release-N+1 - fail to configure pods with the annotation set. +* release-N+2 - remove any code handling the annotation, effectively ignoring it. + +Note that `GatewayRequest` is not listed, the default gateway is an attribute of the network is not going to be +configurable per pod. + +### Address allocation + +OVN-Kubernetes currently [generates](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/3ef29b9a32b04b7917a0afd6b0e9651d17242ed7/go-controller/pkg/util/net.go#L100-L113) +the overlay MAC addresses from the IPs: + +* IPv4: It takes the four octets of the address (e.g `AA.BB.CC.DD`) and uses them to +create the MAC address with a constant prefix (e.g. `0A:58:AA:BB:CC:DD`). +* IPv6: Computes a SHA256 checksum from the IPv6 string and uses the first four bytes for the MAC +address with the `0A:58` constant prefix(e.g. `0A:58:SHA[0]:SHA[1]:SHA[2]:SHA[3]`). + +Although unlikely, we need to implement logic that ensures that the MAC address requested through +the `NetworkSelectionElement` does not conflict with any other configured address on the UDN +(including addresses consumed by OVN-Kubernetes). + +OVN-Kubernetes already persists the IP and MAC addresses in the `k8s.ovn.org/pod-networks` annotation for each pod: + +```cgo +// PodAnnotation describes the assigned network details for a single pod network. (The +// actual annotation may include the equivalent of multiple PodAnnotations.) +type PodAnnotation struct { +// IPs are the pod's assigned IP addresses/prefixes +IPs []*net.IPNet +// MAC is the pod's assigned MAC address +MAC net.HardwareAddr +// Gateways are the pod's gateway IP addresses; note that there may be +// fewer Gateways than IPs. +Gateways []net.IP + +// GatewayIPv6LLA is the IPv6 Link Local Address for the pod's gateway, that is the address +// that will be set as gateway with router advertisements +// generated from the gateway router from the node where the pod is running. +GatewayIPv6LLA net.IP + +// Routes are additional routes to add to the pod's network namespace +Routes []PodRoute + +// TunnelID assigned to each pod for layer2 secondary networks +TunnelID int + +// Role defines what role this network plays for the given pod. +// Expected values are: +// (1) "primary" if this network is the primary network of the pod. +// The "default" network is the primary network of any pod usually +// unless user-defined-network-segmentation feature has been activated. +// If network segmentation feature is enabled then any user defined +// network can be the primary network of the pod. +// (2) "secondary" if this network is the secondary network of the pod. +// Only user defined networks can be secondary networks for a pod. +// (3) "infrastructure-locked" is applicable only to "default" network if +// a user defined network is the "primary" network for this pod. This +// signifies the "default" network is only used for probing and +// is otherwise locked for all intents and purposes. +// At a given time a pod can have only 1 network with role:"primary" +Role string +} +``` + +This annotation will be used to build an initial cache of allocated addresses at startup, which will then be updated +dynamically at runtime and used for conflict detection. +A similar approach is required for IP address conflict detection. +When a conflict is detected the pod should not start and an appropriate event should be emitted. + +When the `NetworkSelectionElement` contains an `IPAMClaimReference` the referenced IPAMClaim should +reflect the IP allocation status including error reporting through the newly introduced +`Conditions` status field. +In the opposite scenario where the `NetworkSelectionElement` does not specify the `IPAMClaimReference` +the IP allocation is not persisted when the pod is removed. + +### Testing Details + +The following scenarios should be covered in testing: + +* VM workloads import into OVN-Kubernetes with no changes to the instances network configuration. +* Imported VM workloads can live-migrate to another node without any additional traffic disruption. +* 'v1.multus-cni.io/default-network' cannot be changed after the pod was created. +* It should be possible to configure the pods MAC or the IP address without configuring the other. +* When `reservedSubnets` is configured automatic IP allocation should not use addresses specified in it. +* It should be possible to configure the pods IP address using the 'v1.multus-cni.io/default-network' +even if the address is a part of the `reservedSubnets`. +* Requesting an IP address and default gateway IP that is not a part of the networks subnet should fail. +* Detect MAC and IP address conflicts between the requested addresses for a newly created pods and the addresses that +are already allocated in the network. +* After configuring custom default gateway and management addresses on a Layer2 UDN the previous default +IPs can be consumed by workloads(e.g. for 10.0.0.0/16 network create pods with 10.0.0.1 and 10.0.0.2 addresses). +* Modifying the default gateway and management addresses on a Layer2 UDN should not be possible after the network +was created. + +The scenarios mentioned above have to cover both IPv4 and IPv6 IP families. + +### Documentation Details + +## Risks, Known Limitations and Mitigations + +* Modifying the 'v1.multus-cni.io/default-network' value after the pod was created could have unpredictable +consequences. +To mitigate this introduce a Validating Admission Policy described in [Implementation Details](#implementation-details). + +* By allowing users to specify the IP and MAC addresses for the pods there is a risk of conflicts. +To mitigate this OVN-Kubernetes will check that the requested addresses are not currently used in the UDN. +There is still a risk that the user picks an address that's consumed by something outside of the UDN but that's beyond +what OVN-Kubernetes controls and can check. + +* The dynamic, per-node subnet allocation in Layer3 UDNs, where each node has a unique default gateway and +management IP, makes user-specified UDN gateway/management IPs and static pod IP/MAC assignments very complex. This +enhancement will not support Layer3 UDNs. + +* BGP support today is limited to cluster UDNs, to ensure a non-NATed traffic for pods with predefined addresses +the user has to use a cluster UDN to configure the network. This is a limitation unrelated to this enhancement +and it is possible it will be solved in the future. + +* By consuming the 'v1.multus-cni.io/default-network' annotation for altering the primary UDNs pod configuration the +user won't be able to use it for configuring the cluster default network attachment. This is acceptable as there is +currently no support for modifying the cluster default network through this annotation while using primary UDNs. +If there is a requirement in the future another mechanism can be considered. + +* OVN-Kubernetes computes MAC addresses from pod IPs rather than allocating them, which creates potential +MAC address conflicts in a potential scenario where a MAC address previously used by a stopped VM gets consumed by +OVN-Kubernetes for a dynamically allocated IP. To mitigate these conflicts, users will have to use a different +MAC address and recreate the workload. For importing workloads that already use this prefix, a future enhancement +could add a field to the Layer2 spec allowing users to specify a custom MAC prefix for the UDN. + +## OVN Kubernetes Version Skew + +## Alternatives + +* Instead of the [Pod network identity](#pod-network-identity) approach, we could expand the +IPAMClaim API. It currently lacks IP request capabilities, and using IPAMClaim for MAC addresses +is confusing. Introducing a new API would mean deprecating the IPAMClaim, while managing +upgrades and supporting both solutions for a period of time. This requires significant effort, which +is not feasible at this time. + +* As described in the [NetworkSelectionElement annotation](#networkselectionelement-annotation) section, using the +`v1.multus-cni.io/default-network` annotation means Multus strictly requires this annotation's value to reference an +existing NAD. An alternative to the proposed approach would be to reference the NAD that defines the primary network. +It was discarded as it would require OVN-Kubernetes to modify the CNI handling logic because multus +would target the CNI requests towards the custom network. Additionally it would require users to determine the exact +NAD name and namespace for every primary UDN pod needing custom MAC, IP, or IPAMClaim. + +## References + +* [IPAMClaim CRD doc](https://docs.google.com/document/d/1OQIJIrCtsYpR5O44w0hpoJ2TyKBz1Du-KhRT4RtrAjk) - `IPAM allocation on behalf of other entities` section + +* IPAMClaim status conditions pull request: diff --git a/mkdocs.yml b/mkdocs.yml index 9fd08b2c08..e5cd4a3dd6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -148,5 +148,6 @@ nav: - Localnet API: okeps/okep-5085-localnet-api.md - Network QoS: okeps/okep-4380-network-qos.md - User Defined Networks: okeps/okep-5193-user-defined-networks.md + - Preconfigured UDN Addresses: okeps/okep-5233-preconfigured-udn-addresses.md - Blog: - blog/index.md From 261078099929bc7878d691adf62af9ce9af04f63 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 15 Jul 2025 18:29:08 -0400 Subject: [PATCH 099/181] *.sh: use /usr/bin/env to locate bash On some unconventional systems like NixOS, bash is not installed in /bin/bash and should instead be located via `env`. (Only `/bin/sh` is guaranteed to be present on a POSIX system under `/bin`.) Signed-off-by: Ihar Hrachyshka --- dist/images/daemonset.sh | 2 +- dist/images/ovn-config.sh | 2 +- dist/images/ovn-run.sh | 2 +- dist/images/ovndb-raft-functions.sh | 2 +- dist/images/ovnkube.sh | 2 +- dist/images/push_manifest.sh | 2 +- dist/install-ovn-k8s.sh | 2 +- go-controller/hack/build-go.sh | 2 +- go-controller/hack/init.sh | 2 +- go-controller/hack/regenerate_vendor_mocks.sh | 2 +- go-controller/hack/test-go.sh | 2 +- go-controller/hack/verify-go-mod-vendor.sh | 2 +- go-controller/hack/verify-gofmt.sh | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 7c3daedee9..0613a37238 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -x #Always exit on errors diff --git a/dist/images/ovn-config.sh b/dist/images/ovn-config.sh index 69e2c6471c..42e0e1253a 100755 --- a/dist/images/ovn-config.sh +++ b/dist/images/ovn-config.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # run on master to configure ovn-kubernetes # The /etc/openvswitch/ovn_k8s.conf and /etc/sysconfig/ovn-kubernetes diff --git a/dist/images/ovn-run.sh b/dist/images/ovn-run.sh index 9f5acfcdca..d684547434 100755 --- a/dist/images/ovn-run.sh +++ b/dist/images/ovn-run.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # run the ovs-vswitchd daemon from a container diff --git a/dist/images/ovndb-raft-functions.sh b/dist/images/ovndb-raft-functions.sh index 4d6e124f2d..8737ca3b50 100644 --- a/dist/images/ovndb-raft-functions.sh +++ b/dist/images/ovndb-raft-functions.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -euo pipefail verify-ovsdb-raft() { diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 32d3347cd3..d8b8869108 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -euo pipefail # Enable verbose shell output if OVNKUBE_SH_VERBOSE is set to 'true' diff --git a/dist/images/push_manifest.sh b/dist/images/push_manifest.sh index f82531df8d..f42c8c30f9 100755 --- a/dist/images/push_manifest.sh +++ b/dist/images/push_manifest.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Currently supported platforms of multi-arch images are: amd64 arm64 LINUX_ARCH=(amd64 arm64) diff --git a/dist/install-ovn-k8s.sh b/dist/install-ovn-k8s.sh index 7ffdc0ac0e..d4b72e8e39 100755 --- a/dist/install-ovn-k8s.sh +++ b/dist/install-ovn-k8s.sh @@ -1,4 +1,4 @@ -#!/bin/bash -ex +#!/usr/bin/env bash -ex # shellcheck disable=SC2016 SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}") diff --git a/go-controller/hack/build-go.sh b/go-controller/hack/build-go.sh index 17b963adea..13a12e0bf9 100755 --- a/go-controller/hack/build-go.sh +++ b/go-controller/hack/build-go.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e GO=${GO:-go} diff --git a/go-controller/hack/init.sh b/go-controller/hack/init.sh index 69dcb8f73e..98a210ea71 100755 --- a/go-controller/hack/init.sh +++ b/go-controller/hack/init.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash OUT_DIR=${OUT_DIR:-_output} diff --git a/go-controller/hack/regenerate_vendor_mocks.sh b/go-controller/hack/regenerate_vendor_mocks.sh index a94d4481cd..c1e94c89e7 100755 --- a/go-controller/hack/regenerate_vendor_mocks.sh +++ b/go-controller/hack/regenerate_vendor_mocks.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash workdir=$(cd ../ && pwd) substitute_string='pkg/testing/mocks' diff --git a/go-controller/hack/test-go.sh b/go-controller/hack/test-go.sh index b41bdc8817..f2df39f672 100755 --- a/go-controller/hack/test-go.sh +++ b/go-controller/hack/test-go.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e source "$(dirname "${BASH_SOURCE}")/init.sh" diff --git a/go-controller/hack/verify-go-mod-vendor.sh b/go-controller/hack/verify-go-mod-vendor.sh index 39ce9104bc..fd865e965c 100755 --- a/go-controller/hack/verify-go-mod-vendor.sh +++ b/go-controller/hack/verify-go-mod-vendor.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -o errexit # Nozero exit code of any of the commands below will fail the test. set -o nounset set -o pipefail diff --git a/go-controller/hack/verify-gofmt.sh b/go-controller/hack/verify-gofmt.sh index de5f65452b..d2c73e47d3 100755 --- a/go-controller/hack/verify-gofmt.sh +++ b/go-controller/hack/verify-gofmt.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -o errexit set -o nounset From 1c559bb923d4d85eb1c8ee5ef44ae0c106107465 Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Tue, 15 Jul 2025 13:32:53 -0700 Subject: [PATCH 100/181] fix ovspinning test error in our cicd, sometimes we noticed ovspinning unit testing error: === RUN TestAlignCPUAffinity I0715 18:20:46.441374 13095 ovspinning_linux_test.go:65] Test CPU Affinity [1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] I0715 18:20:46.441595 13095 ovspinning_linux.go:46] Starting OVS daemon CPU pinning I0715 18:20:46.464072 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13108) (ntasks=15) to 0, was 0-7 I0715 18:20:46.465325 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13107) (ntasks=1) to 0, was 0-7 I0715 18:20:46.484089 13095 ovspinning_linux_test.go:65] Test CPU Affinity [2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] I0715 18:20:46.502727 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13108) (ntasks=15) to 1, was 0 I0715 18:20:46.502910 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13107) (ntasks=1) to 1, was 0 ... I0715 18:20:46.602398 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13108) (ntasks=15) to 5, was 4 I0715 18:20:46.602929 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13107) (ntasks=1) to 5, was 4 I0715 18:20:46.626233 13095 ovspinning_linux_test.go:65] Test CPU Affinity [40 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] I0715 18:20:46.642720 13095 ovspinning_linux.go:196] Setting CPU affinity of PID(13108) (ntasks=15) to 6, was 5 ovspinning_linux_test.go:67: Error Trace: /builds/sdn/ovn-kubernetes/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go:251 /builds/sdn/ovn-kubernetes/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go:67 Error: Not equal: expected: unix.CPUSet{0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} actual : unix.CPUSet{0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} Diff: --- Expected +++ Actual @@ -1,3 +1,3 @@ (unix.CPUSet) (len=16) { - (unix.cpuMask) 64, + (unix.cpuMask) 32, (unix.cpuMask) 0, Test: TestAlignCPUAffinity Messages: task[13219] of process[13108] Expected CPUSet [40 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] != Actual CPUSet [20 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Signed-off-by: Yun Zhou --- .../pkg/node/ovspinning/ovspinning_linux_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go b/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go index 634878d55c..3d9606079f 100644 --- a/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go +++ b/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go @@ -246,10 +246,12 @@ func assertPIDHasSchedAffinity(t *testing.T, pid int, expectedCPUSet unix.CPUSet require.NoError(t, err) for _, task := range tasks { - err := unix.SchedGetaffinity(task, &actual) - require.NoError(t, err) - assert.Equal(t, expectedCPUSet, actual, - "task[%d] of process[%d] Expected CPUSet %0x != Actual CPUSet %0x", task, pid, expectedCPUSet, actual) + assert.Eventually(t, func() bool { + err := unix.SchedGetaffinity(task, &actual) + assert.NoError(t, err) + + return actual == expectedCPUSet + }, time.Second, 10*time.Millisecond, "task[%d] of process[%d] Expected CPUSet %0x != Actual CPUSet %0x", task, pid, expectedCPUSet, actual) } } From f09eca1cfaa23a691141b626fea8f1df498769a6 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 16 Jan 2025 11:38:00 +0100 Subject: [PATCH 101/181] Revert "Add option to disable udn-host-isolation." This reverts commit d87f4800b5593f3de5dce7467e68bea109b1ad85. Signed-off-by: Nadia Pinaeva --- .github/workflows/test.yml | 1 - dist/images/ovnkube.sh | 2 - go-controller/pkg/config/config.go | 27 ++--- .../node/default_node_network_controller.go | 2 +- test/e2e/network_segmentation.go | 98 +++++++++---------- test/e2e/util.go | 5 - 6 files changed, 56 insertions(+), 79 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1782c16b65..480a6345b8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -495,7 +495,6 @@ jobs: OVN_SECOND_BRIDGE: "${{ matrix.second-bridge == '2br' }}" ENABLE_MULTI_NET: "${{ matrix.target == 'multi-homing' || matrix.target == 'kv-live-migration' || matrix.target == 'network-segmentation' || matrix.target == 'tools' || matrix.target == 'multi-homing-helm' || matrix.target == 'traffic-flow-test-only' || matrix.routeadvertisements != '' }}" ENABLE_NETWORK_SEGMENTATION: "${{ matrix.target == 'network-segmentation' || matrix.network-segmentation == 'enable-network-segmentation' }}" - DISABLE_UDN_HOST_ISOLATION: "true" PLATFORM_IPV4_SUPPORT: "${{ matrix.ipfamily == 'IPv4' || matrix.ipfamily == 'dualstack' }}" PLATFORM_IPV6_SUPPORT: "${{ matrix.ipfamily == 'IPv6' || matrix.ipfamily == 'dualstack' }}" KIND_INSTALL_KUBEVIRT: "${{ matrix.target == 'kv-live-migration' }}" diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 32d3347cd3..fc5ab6a9b6 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -2162,7 +2162,6 @@ ovnkube-controller-with-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube-controller-with-node.pid \ - --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube-controller-with-node @@ -2814,7 +2813,6 @@ ovn-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube.pid \ - --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index 297f18b55f..89757b864b 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -425,18 +425,15 @@ type OVNKubernetesFeatureConfig struct { EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` EnablePreconfiguredUDNAddresses bool `gcfg:"enable-preconfigured-udn-addresses"` EnableRouteAdvertisements bool `gcfg:"enable-route-advertisements"` - // This feature requires a kernel fix https://github.com/torvalds/linux/commit/7f3287db654395f9c5ddd246325ff7889f550286 - // to work on a kind cluster. Flag allows to disable it for current CI, will be turned on when github runners have this fix. - DisableUDNHostIsolation bool `gcfg:"disable-udn-host-isolation"` - EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` - EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` - EnableInterconnect bool `gcfg:"enable-interconnect"` - EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` - EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` - EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` - EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` - EnableObservability bool `gcfg:"enable-observability"` - EnableNetworkQoS bool `gcfg:"enable-network-qos"` + EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` + EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` + EnableInterconnect bool `gcfg:"enable-interconnect"` + EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` + EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` + EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` + EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableObservability bool `gcfg:"enable-observability"` + EnableNetworkQoS bool `gcfg:"enable-network-qos"` } // GatewayMode holds the node gateway mode @@ -1087,12 +1084,6 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy, Value: OVNKubernetesFeature.EnableMultiNetworkPolicy, }, - &cli.BoolFlag{ - Name: "disable-udn-host-isolation", - Usage: "Configure to disable UDN host isolation with ovn-kubernetes.", - Destination: &cliConfig.OVNKubernetesFeature.DisableUDNHostIsolation, - Value: OVNKubernetesFeature.DisableUDNHostIsolation, - }, &cli.BoolFlag{ Name: "enable-network-segmentation", Usage: "Configure to use network segmentation feature with ovn-kubernetes.", diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 7a75c36984..a27a611688 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -150,7 +150,7 @@ func newDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, sto routeManager: routeManager, ovsClient: ovsClient, } - if util.IsNetworkSegmentationSupportEnabled() && !config.OVNKubernetesFeature.DisableUDNHostIsolation { + if util.IsNetworkSegmentationSupportEnabled() { c.udnHostIsolationManager = NewUDNHostIsolationManager(config.IPv4Mode, config.IPv6Mode, cnnci.watchFactory.PodCoreInformer(), cnnci.name, cnnci.recorder) } diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index 83fc059678..43d7d9fb5a 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -385,52 +385,50 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { }, 10*time.Second, 1*time.Second).Should(BeTrue()) Expect(udnPod.Status.ContainerStatuses[0].RestartCount).To(Equal(int32(0))) - if !isUDNHostIsolationDisabled() { - By("checking default network hostNetwork pod and non-kubelet host process can't reach the UDN pod") - hostNetPod, err := createPod(f, "host-net-pod", nodeName, - defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) { - pod.Spec.HostNetwork = true - }) - Expect(err).NotTo(HaveOccurred()) + By("checking default network hostNetwork pod and non-kubelet host process can't reach the UDN pod") + hostNetPod, err := createPod(f, "host-net-pod", nodeName, + defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) { + pod.Spec.HostNetwork = true + }) + Expect(err).NotTo(HaveOccurred()) - // positive check for reachable default network pod - for _, destIP := range []string{defaultIPv4, defaultIPv6} { - if destIP == "" { - continue - } - By("checking the default network hostNetwork can reach default pod on IP " + destIP) - Eventually(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetDefaultPort) == nil - }).Should(BeTrue()) - By("checking the non-kubelet host process can reach default pod on IP " + destIP) - Eventually(func() bool { - _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ - "curl", "--connect-timeout", "2", - net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetDefaultPort)), + // positive check for reachable default network pod + for _, destIP := range []string{defaultIPv4, defaultIPv6} { + if destIP == "" { + continue + } + By("checking the default network hostNetwork can reach default pod on IP " + destIP) + Eventually(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetDefaultPort) == nil + }).Should(BeTrue()) + By("checking the non-kubelet host process can reach default pod on IP " + destIP) + Eventually(func() bool { + _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ + "curl", "--connect-timeout", "2", + net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetDefaultPort)), }) - return err == nil - }).Should(BeTrue()) + return err == nil + }).Should(BeTrue()) + } + // negative check for UDN pod + for _, destIP := range []string{udnIPv4, udnIPv6} { + if destIP == "" { + continue } - // negative check for UDN pod - for _, destIP := range []string{udnIPv4, udnIPv6} { - if destIP == "" { - continue - } - By("checking the default network hostNetwork pod can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) + By("checking the default network hostNetwork pod can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) - By("checking the non-kubelet host process can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ - "curl", "--connect-timeout", "2", - net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetPort)), + By("checking the non-kubelet host process can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ + "curl", "--connect-timeout", "2", + net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetPort)), }) - return err != nil - }, 5*time.Second).Should(BeTrue()) - } + return err != nil + }, 5*time.Second).Should(BeTrue()) } By("asserting UDN pod can reach the kapi service in the default network") @@ -1645,12 +1643,10 @@ spec: return connectToServer(podConfiguration{namespace: defaultClientPod.Namespace, name: defaultClientPod.Name}, destIP, podClusterNetPort) != nil }, 5*time.Second).Should(BeTrue()) - if !isUDNHostIsolationDisabled() { - By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) - } + By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) } By("Open UDN pod port") @@ -1695,12 +1691,10 @@ spec: return connectToServer(podConfiguration{namespace: defaultClientPod.Namespace, name: defaultClientPod.Name}, destIP, podClusterNetPort) != nil }, 5*time.Second).Should(BeTrue()) - if !isUDNHostIsolationDisabled() { - By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) - Eventually(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) - } + By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) + Eventually(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) } By("Verify syntax error is reported via event") events, err := cs.CoreV1().Events(udnPod.Namespace).List(context.Background(), metav1.ListOptions{}) diff --git a/test/e2e/util.go b/test/e2e/util.go index aba6dcbc44..47d0ba4f91 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1144,11 +1144,6 @@ func isInterconnectEnabled() bool { return present && val == "true" } -func isUDNHostIsolationDisabled() bool { - val, present := os.LookupEnv("DISABLE_UDN_HOST_ISOLATION") - return present && val == "true" -} - func isNetworkSegmentationEnabled() bool { val, present := os.LookupEnv("ENABLE_NETWORK_SEGMENTATION") return present && val == "true" From 99a7b11115e51b2aedd7aecf0e444883524e7c54 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Thu, 16 Jan 2025 16:14:09 +0100 Subject: [PATCH 102/181] Update CI runners to use ubuntu 24.04 Remove nonexistent packages Signed-off-by: Nadia Pinaeva --- .github/workflows/test.yml | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 480a6345b8..e2a0067ee6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -38,7 +38,7 @@ jobs: # separate job for parallelism lint: name: Lint - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -63,7 +63,7 @@ jobs: build-master: name: Build-master - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: # Create a cache for the built master image - name: Restore master image cache @@ -156,7 +156,7 @@ jobs: build-pr: name: Build-PR - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: # Create a cache for the build PR image - name: Restore PR image cache @@ -271,7 +271,7 @@ jobs: ovn-upgrade-e2e: name: Upgrade OVN from Master to PR branch based image if: github.event_name != 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 120 needs: - build-master @@ -319,10 +319,9 @@ jobs: sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* # clean unused packages sudo apt-get autoclean @@ -422,7 +421,7 @@ jobs: e2e: name: e2e - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 # 30 mins for kind, 180 mins for control-plane tests, 10 minutes for all other steps timeout-minutes: 220 strategy: @@ -525,10 +524,9 @@ jobs: sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* # clean unused packages sudo apt-get autoclean @@ -712,7 +710,7 @@ jobs: e2e-dual-conversion: name: e2e-dual-conversion if: github.event_name != 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 60 strategy: fail-fast: false @@ -761,10 +759,9 @@ jobs: sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* # clean unused packages sudo apt-get autoclean From 6b01b29b9feb90df4a1f695b455dd274cd9cf127 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Tue, 15 Jul 2025 17:33:10 +0200 Subject: [PATCH 103/181] [e2e] Change node ip replacement commands to work on ubuntu 24. If you add a second ip from the same subnet to an interface, it will be considered a secondary IP address and will be deleted together with the primary (aka old) IP. Therefore, remove the primary IP first, then add new one. Routes should be picked up just fine. Signed-off-by: Nadia Pinaeva --- test/e2e/node_ip_mac_migration.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index a74d161c0d..0326c2c7b7 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -953,26 +953,31 @@ func migrateWorkerNodeIP(nodeName, fromIP, targetIP string, invertOrder bool) (e // Define a function to change the IP address for later use. changeIPAddress := func() error { - // Add new IP first - this will preserve the default route. newIPMask := targetIP + "/" + mask - framework.Logf("Adding new IP address %s to node %s", newIPMask, nodeName) - // Add cleanup command. - cleanupCmd := []string{"ip", "address", "del", newIPMask, "dev", iface} + + // Delete current IP address. If you add a second ip from the same subnet to an interface, it will + // be considered a secondary IP address and will be deleted together with the primary (aka old) IP. + framework.Logf("Deleting current IP address %s from node %s", parsedNetIPMask.String(), nodeName) + // Add cleanup command to add original IP back to the end of the cleanupCommands list. + // This way, we preserve first delete then add new IP sequence. + cleanupCmd := []string{"ip", "address", "add", parsedNetIPMask.String(), "dev", iface} cleanupCommands = append(cleanupCommands, cleanupCmd) // Run command. - _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "add", newIPMask, "dev", iface}) + _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "del", parsedNetIPMask.String(), "dev", iface}) if err != nil { - return fmt.Errorf("failed to add new IP %s to interface %s on node %s: %v", newIPMask, iface, nodeName, err) + return err } - // Delete current IP address. On rollback, first add the old IP and then delete the new one. - framework.Logf("Deleting current IP address %s from node %s", parsedNetIPMask.String(), nodeName) - // Add cleanup command. - cleanupCmd = []string{"ip", "address", "add", parsedNetIPMask.String(), "dev", iface} + + // Now add new IP. + framework.Logf("Adding new IP address %s to node %s", newIPMask, nodeName) + // Add cleanup command to remove the new IP address to the beginning of the cleanupCommands list. + cleanupCmd = []string{"ip", "address", "del", newIPMask, "dev", iface} cleanupCommands = append([][]string{cleanupCmd}, cleanupCommands...) + // Run command. - _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "del", parsedNetIPMask.String(), "dev", iface}) + _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "add", newIPMask, "dev", iface}) if err != nil { - return err + return fmt.Errorf("failed to add new IP %s to interface %s on node %s: %v", newIPMask, iface, nodeName, err) } return nil } From ab8d473387ea2eced0bdbc432bbaf46fa988328e Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Mon, 14 Jul 2025 09:54:20 +0200 Subject: [PATCH 104/181] virt: Delete LSP at external process killing VM When something like OOM killer kill the running virt-launcher pod code enter on [1] this prevent ovnk from deleting the LSP so when VM is restarted the traffic is blocked. This change just return nil virtual machine status, since there is no live migration going on. [1] https://github.com/openshift/ovn-kubernetes/blob/release-4.18/go-controller/pkg/kubevirt/pod.go#L475 Signed-off-by: Enrique Llorente --- go-controller/pkg/kubevirt/pod.go | 12 +++-- go-controller/pkg/kubevirt/pod_test.go | 11 ++-- go-controller/pkg/ovn/kubevirt_test.go | 10 ++-- test/e2e/kubevirt.go | 69 +++++++++++++++++++++----- test/e2e/kubevirt/pod.go | 17 +++++++ 5 files changed, 92 insertions(+), 27 deletions(-) diff --git a/go-controller/pkg/kubevirt/pod.go b/go-controller/pkg/kubevirt/pod.go index b0f43ffcbf..901d28ca74 100644 --- a/go-controller/pkg/kubevirt/pod.go +++ b/go-controller/pkg/kubevirt/pod.go @@ -470,11 +470,15 @@ func DiscoverLiveMigrationStatus(client *factory.WatchFactory, pod *corev1.Pod) targetPod := vmPods[len(vmPods)-1] livingPods := filterNotComplete(vmPods) + + // If there is no living pod we should state no live migration status + if len(livingPods) == 0 { + return nil, nil + } + + // There is a living pod but is not the target one so the migration + // has failed. if util.PodCompleted(targetPod) { - // if target pod failed, then there should be only one living source pod. - if len(livingPods) != 1 { - return nil, fmt.Errorf("unexpected live migration state: should have a single living pod") - } return &LiveMigrationStatus{ SourcePod: livingPods[0], TargetPod: targetPod, diff --git a/go-controller/pkg/kubevirt/pod_test.go b/go-controller/pkg/kubevirt/pod_test.go index 8db076019b..2bab9282f8 100644 --- a/go-controller/pkg/kubevirt/pod_test.go +++ b/go-controller/pkg/kubevirt/pod_test.go @@ -98,6 +98,11 @@ var _ = Describe("Kubevirt Pod", func() { pods: []corev1.Pod{successfullyMigratedKvSourcePod, failedMigrationKvTargetPod, successfulMigrationKvTargetPod}, }, ), + Entry("returns nil when there is all the pods are completed (not running vm after migration)", + testParams{ + pods: []corev1.Pod{completedKubevirtPod(t0), completedKubevirtPod(t1), completedKubevirtPod(t3)}, + }, + ), Entry("returns Migration in progress status when 2 pods are running, target pod is not yet ready", testParams{ pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod}, @@ -148,12 +153,6 @@ var _ = Describe("Kubevirt Pod", func() { }, }, ), - Entry("returns err when kubevirt VM has several living pods and target pod failed", - testParams{ - pods: []corev1.Pod{runningKvSourcePod, successfulMigrationKvTargetPod, anotherFailedMigrationKvTargetPod}, - expectedError: fmt.Errorf("unexpected live migration state: should have a single living pod"), - }, - ), Entry("returns err when kubevirt VM has several living pods", testParams{ pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod, yetAnotherDuringMigrationKvTargetPod}, diff --git a/go-controller/pkg/ovn/kubevirt_test.go b/go-controller/pkg/ovn/kubevirt_test.go index 71293a0763..342aad35d4 100644 --- a/go-controller/pkg/ovn/kubevirt_test.go +++ b/go-controller/pkg/ovn/kubevirt_test.go @@ -146,10 +146,6 @@ var _ = Describe("OVN Kubevirt Operations", func() { addressIPv6: "fd11::3", }, } - logicalSwitch *nbdb.LogicalSwitch - ovnClusterRouter *nbdb.LogicalRouter - logicalRouterPort *nbdb.LogicalRouterPort - migrationSourceLSRP, migrationTargetLSRP *nbdb.LogicalSwitchPort lrpIP = func(network string) string { return strings.Split(network, "/")[0] @@ -497,6 +493,12 @@ var _ = Describe("OVN Kubevirt Operations", func() { Context("during execution", func() { DescribeTable("reconcile migratable vm pods", func(t testData) { + var ( + logicalSwitch *nbdb.LogicalSwitch + ovnClusterRouter *nbdb.LogicalRouter + logicalRouterPort *nbdb.LogicalRouterPort + migrationSourceLSRP, migrationTargetLSRP *nbdb.LogicalSwitchPort + ) _, parsedClusterCIDRIPv4, err := net.ParseCIDR(clusterCIDRIPv4) Expect(err).ToNot(HaveOccurred()) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index d6a774ec4d..4406a4d964 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -50,7 +50,7 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" utilnet "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" butaneconfig "github.com/coreos/butane/config" @@ -794,9 +794,9 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun }).WithPolling(time.Second).WithTimeout(time.Minute).Should(Succeed()) } - waitVirtualMachineInstanceReadiness = func(vmi *kubevirtv1.VirtualMachineInstance) { + waitVirtualMachineInstanceReadinessWith = func(vmi *kubevirtv1.VirtualMachineInstance, conditionStatus corev1.ConditionStatus) { GinkgoHelper() - By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vmi.Name)) + By(fmt.Sprintf("Waiting for readiness=%q at virtual machine %s", conditionStatus, vmi.Name)) Eventually(func() []kubevirtv1.VirtualMachineInstanceCondition { err := crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi) Expect(err).To(SatisfyAny( @@ -807,10 +807,20 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun }).WithPolling(time.Second).WithTimeout(5 * time.Minute).Should( ContainElement(SatisfyAll( HaveField("Type", kubevirtv1.VirtualMachineInstanceReady), - HaveField("Status", corev1.ConditionTrue), + HaveField("Status", conditionStatus), ))) } + waitVirtualMachineInstanceReadiness = func(vmi *kubevirtv1.VirtualMachineInstance) { + GinkgoHelper() + waitVirtualMachineInstanceReadinessWith(vmi, corev1.ConditionTrue) + } + + waitVirtualMachineInstanceFailed = func(vmi *kubevirtv1.VirtualMachineInstance) { + GinkgoHelper() + waitVirtualMachineInstanceReadinessWith(vmi, corev1.ConditionFalse) + } + waitVirtualMachineAddresses = func(vmi *kubevirtv1.VirtualMachineInstance) []kubevirt.Address { GinkgoHelper() step := by(vmi.Name, "Wait for virtual machine to receive IPv4 address from DHCP") @@ -903,7 +913,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun NetworkSource: networkSource, }, }, - TerminationGracePeriodSeconds: pointer.Int64(5), + TerminationGracePeriodSeconds: ptr.To(int64(5)), Volumes: []kubevirtv1.Volume{ { Name: "containerdisk", @@ -929,7 +939,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun GenerateName: vmi.GenerateName, }, Spec: kubevirtv1.VirtualMachineSpec{ - Running: pointer.Bool(true), + RunStrategy: ptr.To(kubevirtv1.RunStrategyAlways), Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: vmi.Annotations, @@ -1414,8 +1424,8 @@ fi Name: "force-post-copy", }, Spec: kvmigrationsv1alpha1.MigrationPolicySpec{ - AllowPostCopy: pointer.Bool(true), - CompletionTimeoutPerGiB: pointer.Int64(1), + AllowPostCopy: ptr.To(true), + CompletionTimeoutPerGiB: ptr.To(int64(1)), BandwidthPerMigration: &bandwidthPerMigration, Selectors: &kvmigrationsv1alpha1.Selectors{ VirtualMachineInstanceSelector: kvmigrationsv1alpha1.LabelSelector{ @@ -2219,15 +2229,20 @@ chpasswd: { expire: False } networkData, err := staticIPsNetworkData(selectCIDRs(vmiIPv4, vmiIPv6)) Expect(err).NotTo(HaveOccurred()) - vmi := fedoraWithTestToolingVMI(nil /*labels*/, nil /*annotations*/, nil /*nodeSelector*/, kubevirtv1.NetworkSource{ + vm := fedoraWithTestToolingVM(nil /*labels*/, nil /*annotations*/, nil /*nodeSelector*/, kubevirtv1.NetworkSource{ Multus: &kubevirtv1.MultusNetwork{ NetworkName: cudn.Name, }, }, userData, networkData) // Harcode mac address so it's the same after live migration - vmi.Spec.Domain.Devices.Interfaces[0].MacAddress = vmiMAC - createVirtualMachineInstance(vmi) - + vm.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress = vmiMAC + createVirtualMachine(vm) + vmi := &kubevirtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: vm.Name, + }, + } waitVirtualMachineInstanceReadiness(vmi) Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) @@ -2253,11 +2268,39 @@ chpasswd: { expire: False } by(vmi.Name, "Running live migration for virtual machine instance") td(vmi) - step = by(vmi.Name, fmt.Sprintf("Login to virtual machine after virtual machine instance live migration")) + // Update vmi status after live migration + Expect(crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + step = by(vmi.Name, "Login to virtual machine after virtual machine instance live migration") Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Check east/west traffic after virtual machine instance live migration") checkEastWestIperfTraffic(vmi, testPodsIPs, step) + + By("Stop iperf3 traffic before force killing vm, so iperf3 server do not get stuck") + output, err = kubevirt.RunCommand(vmi, "killall iperf3", 5*time.Second) + Expect(err).ToNot(HaveOccurred(), output) + + step = by(vmi.Name, fmt.Sprintf("Force kill qemu at node %q where VM is running on", vmi.Status.NodeName)) + Expect(kubevirt.ForceKillVirtLauncherAtNode(infraprovider.Get(), vmi.Status.NodeName, vmi.Namespace, vmi.Name)).To(Succeed()) + + step = by(vmi.Name, "Waiting for failed restarted VMI to reach ready state") + waitVirtualMachineInstanceFailed(vmi) + waitVirtualMachineInstanceReadiness(vmi) + Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + step = by(vmi.Name, "Login to virtual machine after virtual machine instance force killed") + Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + + step = by(vmi.Name, "Restart iperf traffic after forcing a vm failure") + Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) + checkEastWestIperfTraffic(vmi, testPodsIPs, step) + + by(vmi.Name, "Running live migration after forcing vm failure") + td(vmi) + + step = by(vmi.Name, "Check east/west traffic for failed virtual machine after live migration") + checkEastWestIperfTraffic(vmi, testPodsIPs, step) }, Entry("after succeeded live migration", liveMigrateSucceed), Entry("after failed live migration", liveMigrateFailed), diff --git a/test/e2e/kubevirt/pod.go b/test/e2e/kubevirt/pod.go index 5ad2011a1f..1293e1acc5 100644 --- a/test/e2e/kubevirt/pod.go +++ b/test/e2e/kubevirt/pod.go @@ -1,6 +1,9 @@ package kubevirt import ( + "fmt" + + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -31,3 +34,17 @@ func GenerateFakeVirtLauncherPod(namespace, vmName string) *corev1.Pod { }, } } + +func ForceKillVirtLauncherAtNode(p infraapi.Provider, nodeName, vmNamespace, vmName string) error { + // /usr/bin/virt-launcher --qemu-timeout 312s --name worker-dcf9j --uid bcf975f4-7bdd-4264-948b-b6080320e38a --namespace kv-live-migration-2575 --kubevirt-share-dir /var/run/kubevirt --ephemeral-disk-dir /var/run/kubevirt-ephemeral-disks --container-disk-dir /var/run/kubevirt/container-disks --grace-period-seconds 20 --hook-sidecars 0 --ovmf-path /usr/share/OVMF --run-as-nonroot + killScript := fmt.Sprintf(` +pid=$(pgrep -f 'virt-launcher .*--name %s.*--namespace %s'|grep -v $$) +ps aux |grep virt-launcher +kill -9 $pid +`, vmName, vmNamespace) + output, err := p.ExecK8NodeCommand(nodeName, []string{"bash", "-xe", "-c", killScript}) + if err != nil { + return fmt.Errorf("%s:%w", output, err) + } + return nil +} From 6018f47f7c229274a38d5697dc83e3b56e194ac2 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Fri, 2 May 2025 09:24:33 +0100 Subject: [PATCH 105/181] E2E: lookup support for IP families instead of using env var Using env vars isnt user friendly and it complicates testing downstream. Signed-off-by: Martin Kennelly --- test/e2e/egressip.go | 5 +- test/e2e/kubevirt.go | 26 ++-- test/e2e/multihoming_utils.go | 35 ++++-- test/e2e/network_segmentation.go | 115 +++++++++++------- ...work_segmentation_endpointslices_mirror.go | 14 ++- test/e2e/network_segmentation_localnet.go | 5 +- test/e2e/network_segmentation_policy.go | 8 +- test/e2e/network_segmentation_services.go | 5 +- test/e2e/pod.go | 6 +- test/e2e/route_advertisements.go | 79 ++++++------ test/e2e/util.go | 27 +++- 11 files changed, 193 insertions(+), 132 deletions(-) diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index 7faad7185e..d9d281aa7b 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -702,6 +702,7 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP if len(nodes.Items) < 3 { framework.Failf("Test requires >= 3 Ready nodes, but there are only %v nodes", len(nodes.Items)) } + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) if isSupported, reason := isNetworkSupported(nodes, netConfigParams); !isSupported { ginkgo.Skip(reason) } @@ -3193,13 +3194,13 @@ spec: ginkgo.Entry("L3 Primary UDN", networkAttachmentConfigParams{ name: "l3primary", topology: types.Layer3Topology, - cidr: correctCIDRFamily("30.10.0.0/16", "2014:100:200::0/60"), + cidr: joinCIDRs("30.10.0.0/16", "2014:100:200::0/60"), role: "primary", }), ginkgo.Entry("L2 Primary UDN", networkAttachmentConfigParams{ name: "l2primary", topology: types.Layer2Topology, - cidr: correctCIDRFamily("10.10.0.0/16", "2014:100:200::0/60"), + cidr: joinCIDRs("10.10.0.0/16", "2014:100:200::0/60"), role: "primary", }), ) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index d6a774ec4d..30c5fd9546 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1699,8 +1699,8 @@ write_files: namespace = fr.Namespace.Name networkName := "" - cidrs := generateL2Subnets(cidrIPv4, cidrIPv6) - cudn, networkName = kubevirt.GenerateCUDN(namespace, "net1", td.topology, td.role, cidrs) + dualCIDRs := filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)}) + cudn, networkName = kubevirt.GenerateCUDN(namespace, "net1", td.topology, td.role, dualCIDRs) if td.topology == udnv1.NetworkTopologyLocalnet { By("setting up the localnet underlay") @@ -1809,7 +1809,7 @@ ip route add %[3]s via %[4]s // expect 2 addresses on dual-stack deployments; 1 on single-stack step = by(vmi.Name, "Wait for addresses at the virtual machine") - expectedNumberOfAddresses := len(cidrs) + expectedNumberOfAddresses := len(dualCIDRs) expectedAddreses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) expectedAddresesAtGuest := expectedAddreses testPodsIPs := podsMultusNetworkIPs(iperfServerTestPods, podNetworkStatusByNetConfigPredicate(namespace, cudn.Name, strings.ToLower(string(td.role)))) @@ -1836,7 +1836,7 @@ ip route add %[3]s via %[4]s checkEastWestIperfTraffic(vmi, testPodsIPs, step) if td.role == udnv1.NetworkRolePrimary { - if isIPv6Supported() && isInterconnectEnabled() { + if isIPv6Supported(fr.ClientSet) && isInterconnectEnabled() { step = by(vmi.Name, fmt.Sprintf("Checking IPv6 gateway before %s %s", td.resource.description, td.test.description)) nodeRunningVMI, err := fr.ClientSet.CoreV1().Nodes().Get(context.Background(), vmi.Status.NodeName, metav1.GetOptions{}) @@ -1906,7 +1906,7 @@ ip route add %[3]s via %[4]s } if td.role == udnv1.NetworkRolePrimary && td.test.description == liveMigrate.description && isInterconnectEnabled() { - if isIPv4Supported() { + if isIPv4Supported(fr.ClientSet) { step = by(vmi.Name, fmt.Sprintf("Checking IPv4 gateway cached mac after %s %s", td.resource.description, td.test.description)) Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) @@ -1923,7 +1923,7 @@ ip route add %[3]s via %[4]s WithPolling(time.Second). Should(Equal(expectedGatewayMAC), step) } - if isIPv6Supported() { + if isIPv6Supported(fr.ClientSet) { step = by(vmi.Name, fmt.Sprintf("Checking IPv6 gateway after %s %s", td.resource.description, td.test.description)) targetNode, err := fr.ClientSet.CoreV1().Nodes().Get(context.Background(), vmi.Status.MigrationState.TargetNode, metav1.GetOptions{}) @@ -2055,8 +2055,8 @@ ip route add %[3]s via %[4]s }) fr.Namespace = ns namespace = fr.Namespace.Name - cidrs := generateL2Subnets(cidrIPv4, cidrIPv6) - cudn, _ := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLayer2, udnv1.NetworkRolePrimary, cidrs) + dualCIDRs := filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)}) + cudn, _ := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLayer2, udnv1.NetworkRolePrimary, dualCIDRs) cudn.Spec.Network.Layer2.MTU = 1300 createCUDN(cudn) @@ -2097,7 +2097,7 @@ ip route add %[3]s via %[4]s Get(context.Background(), config.Kubernetes.DNSServiceName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - if isIPv4Supported() { + if isIPv4Supported(fr.ClientSet) { expectedIP, err := matchIPv4StringFamily(primaryUDNNetworkStatus.IPs) Expect(err).NotTo(HaveOccurred()) @@ -2125,7 +2125,7 @@ ip route add %[3]s via %[4]s Expect(primaryUDNValueForDevice("GENERAL.MTU")).To(ConsistOf("1300")) } - if isIPv6Supported() { + if isIPv6Supported(fr.ClientSet) { expectedIP, err := matchIPv6StringFamily(primaryUDNNetworkStatus.IPs) Expect(err).NotTo(HaveOccurred()) Eventually(primaryUDNValueFor). @@ -2164,7 +2164,7 @@ ip route add %[3]s via %[4]s vmiIPv4 = "10.128.0.100/24" vmiIPv6 = "2010:100:200::100/60" vmiMAC = "0A:58:0A:80:00:64" - cidr = selectCIDRs(ipv4CIDR, ipv6CIDR) + cidrs = []string{ipv4CIDR, ipv6CIDR} staticIPsNetworkData = func(ips []string) (string, error) { type Ethernet struct { Addresses []string `json:"addresses,omitempty"` @@ -2213,10 +2213,10 @@ chpasswd: { expire: False } selectedNodes = workerNodeList.Items Expect(selectedNodes).NotTo(BeEmpty()) - iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, cudn.Spec.Network.Localnet.Role, cidr) + iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, cudn.Spec.Network.Localnet.Role, filterCIDRs(fr.ClientSet, cidrs...)) Expect(err).NotTo(HaveOccurred()) - networkData, err := staticIPsNetworkData(selectCIDRs(vmiIPv4, vmiIPv6)) + networkData, err := staticIPsNetworkData(filterCIDRs(fr.ClientSet, vmiIPv4, vmiIPv6)) Expect(err).NotTo(HaveOccurred()) vmi := fedoraWithTestToolingVMI(nil /*labels*/, nil /*annotations*/, nil /*nodeSelector*/, kubevirtv1.NetworkSource{ diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index 636ea78eba..2fb10354d4 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -29,24 +29,33 @@ func netCIDR(netCIDR string, netPrefixLengthPerNode int) string { return fmt.Sprintf("%s/%d", netCIDR, netPrefixLengthPerNode) } -// takes ipv4 and ipv6 cidrs and returns the correct type for the cluster under test -func correctCIDRFamily(ipv4CIDR, ipv6CIDR string) string { - return strings.Join(selectCIDRs(ipv4CIDR, ipv6CIDR), ",") +func joinCIDRs(cidrs ...string) string { + return strings.Join(cidrs, ",") } -// takes ipv4 and ipv6 cidrs and returns the correct type for the cluster under test -func selectCIDRs(ipv4CIDR, ipv6CIDR string) []string { - // dual stack cluster - if isIPv6Supported() && isIPv4Supported() { - return []string{ipv4CIDR, ipv6CIDR} +func splitCIDRs(cidrs string) []string { + if cidrs == "" { + return []string{} } - // is an ipv6 only cluster - if isIPv6Supported() { - return []string{ipv6CIDR} + return strings.Split(cidrs, ",") +} + +func filterCIDRsAndJoin(cs clientset.Interface, cidrs string) string { + if cidrs == "" { + return "" // we may not always set CIDR - i.e. CDN } + return joinCIDRs(filterCIDRs(cs, splitCIDRs(cidrs)...)...) +} - //ipv4 only cluster - return []string{ipv4CIDR} +func filterCIDRs(cs clientset.Interface, cidrs ...string) []string { + var supportedCIDRs []string + for _, cidr := range cidrs { + if !isCIDRIPFamilySupported(cs, cidr) { + continue + } + supportedCIDRs = append(supportedCIDRs, cidr) + } + return supportedCIDRs } func getNetCIDRSubnet(netCIDR string) (string, error) { diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index 12c6da9ee9..4a9db3330d 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -10,6 +10,7 @@ import ( "strings" "time" + udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" @@ -90,6 +91,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { func(netConfig *networkAttachmentConfigParams) { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) By("creating a pod on the udn namespace") @@ -126,7 +128,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -134,7 +136,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -157,6 +159,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) By("creating client/server pods") @@ -198,7 +201,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -216,7 +219,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -268,6 +271,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By("creating the network") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) Expect(createNetworkFn(netConfigParams)).To(Succeed()) udnPodConfig.namespace = f.Namespace.Name @@ -496,7 +500,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -511,7 +515,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -565,11 +569,12 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { netConfig := &networkAttachmentConfigParams{ topology: topology, - cidr: correctCIDRFamily(userDefinedv4Subnet, userDefinedv6Subnet), + cidr: joinCIDRs(userDefinedv4Subnet, userDefinedv6Subnet), role: "primary", namespace: namespace, name: network, } + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) // update the name because createNetworkFn may mutate the netConfig.name @@ -714,16 +719,18 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { name: "tenant-blue", namespace: f.Namespace.Name, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", } + netConfig1.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig1.cidr) netConfig2 := networkAttachmentConfigParams{ name: "blue", namespace: f.Namespace.Name + "-tenant", topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", } + netConfig2.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig2.cidr) nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { @@ -836,6 +843,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { DescribeTable("should be able to send multicast UDP traffic between nodes", func(netConfigParams networkAttachmentConfigParams) { ginkgo.By("creating the attachment configuration") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(cs, netConfigParams.cidr) netConfig := newNetworkAttachmentConfig(netConfigParams) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), @@ -848,19 +856,20 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { ginkgo.Entry("with primary layer3 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), ) DescribeTable("should be able to receive multicast IGMP query", func(netConfigParams networkAttachmentConfigParams) { ginkgo.By("creating the attachment configuration") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(cs, netConfigParams.cidr) netConfig := newNetworkAttachmentConfig(netConfigParams) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), @@ -873,14 +882,14 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { ginkgo.Entry("with primary layer3 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), // TODO: this test is broken, see https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5309 //ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ // name: nadName, // topology: "layer2", - // cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + // cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), // role: "primary", //}), ) @@ -910,7 +919,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { Expect(err).NotTo(HaveOccurred()) By("create tests UserDefinedNetwork") - cleanup, err := createManifest(defaultNetNamespace.Name, newPrimaryUserDefinedNetworkManifest(testUdnName)) + cleanup, err := createManifest(defaultNetNamespace.Name, newPrimaryUserDefinedNetworkManifest(cs, testUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) Eventually(userDefinedNetworkReadyFunc(f.DynamicClient, defaultNetNamespace.Name, testUdnName), 5*time.Second).Should(Not(Succeed())) @@ -1127,13 +1136,13 @@ spec: topology: "layer3", name: primaryNadName, networkName: primaryNadName, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), })) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create(context.Background(), primaryNetNad, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("create primary network UserDefinedNetwork") - cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(primaryUdnName)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(cs, primaryUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) @@ -1425,14 +1434,14 @@ spec: topology: "layer3", name: primaryNadName, networkName: primaryNadName, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), })) _, err := nadClient.NetworkAttachmentDefinitions(primaryNetTenantNs).Create(context.Background(), primaryNetNad, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("create primary Cluster UDN CR") cudnName := randomNetworkMetaName() - cleanup, err := createManifest(f.Namespace.Name, newPrimaryClusterUDNManifest(cudnName, testTenantNamespaces...)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryClusterUDNManifest(cs, cudnName, testTenantNamespaces...)) DeferCleanup(func() { cleanup() _, err := e2ekubectl.RunKubectl("", "delete", "clusteruserdefinednetwork", cudnName, "--wait", fmt.Sprintf("--timeout=%ds", 60)) @@ -1494,6 +1503,7 @@ spec: By("creating the network") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) Expect(createNetworkFn(netConfigParams)).To(Succeed()) By("instantiating the client pod") @@ -1521,15 +1531,15 @@ spec: Expect(err).NotTo(HaveOccurred()) framework.Logf("Client pod's annotation for network %s is %v", netConfigParams.name, podAnno) - Expect(podAnno.Routes).To(HaveLen(expectedNumberOfRoutes(*netConfigParams))) + Expect(podAnno.Routes).To(HaveLen(expectedNumberOfRoutes(cs, *netConfigParams))) - assertClientExternalConnectivity(clientPodConfig, externalContainer.GetIPv4(), externalContainer.GetIPv6(), externalContainer.GetPort()) + assertClientExternalConnectivity(cs, clientPodConfig, externalContainer.GetIPv4(), externalContainer.GetIPv6(), externalContainer.GetPort()) }, Entry("by one pod over a layer2 network", &networkAttachmentConfigParams{ name: userDefinedNetworkName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig("client-pod"), @@ -1538,7 +1548,7 @@ spec: &networkAttachmentConfigParams{ name: userDefinedNetworkName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig("client-pod"), @@ -1585,7 +1595,7 @@ spec: BeforeEach(func() { By("create tests UserDefinedNetwork") - cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(testUdnName)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(cs, testUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) Eventually(userDefinedNetworkReadyFunc(f.DynamicClient, f.Namespace.Name, testUdnName), 5*time.Second, time.Second).Should(Succeed()) @@ -1732,6 +1742,7 @@ spec: } By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) udnManifest := generateUserDefinedNetworkManifest(&netConfig) cleanup, err := createManifest(netConfig.namespace, udnManifest) Expect(err).ShouldNot(HaveOccurred(), "creating manifest must succeed") @@ -1770,7 +1781,7 @@ spec: networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -1788,7 +1799,7 @@ spec: networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -2152,7 +2163,7 @@ spec: ` } -func newPrimaryClusterUDNManifest(name string, targetNamespaces ...string) string { +func newPrimaryClusterUDNManifest(cs clientset.Interface, name string, targetNamespaces ...string) string { targetNs := strings.Join(targetNamespaces, ",") return ` apiVersion: k8s.ovn.org/v1 @@ -2169,7 +2180,7 @@ spec: topology: Layer3 layer3: role: Primary - subnets: ` + generateCIDRforClusterUDN("10.20.100.0/16", "2014:100:200::0/60") + subnets: ` + generateCIDRforClusterUDN(cs, "10.20.100.0/16", "2014:100:200::0/60") } func newL2SecondaryUDNManifest(name string) string { @@ -2186,7 +2197,7 @@ spec: ` } -func newPrimaryUserDefinedNetworkManifest(name string) string { +func newPrimaryUserDefinedNetworkManifest(cs clientset.Interface, name string) string { return ` apiVersion: k8s.ovn.org/v1 kind: UserDefinedNetwork @@ -2196,19 +2207,19 @@ spec: topology: Layer3 layer3: role: Primary - subnets: ` + generateCIDRforUDN("10.20.100.0/16", "2014:100:200::0/60") + subnets: ` + generateCIDRforUDN(cs, "10.20.100.0/16", "2014:100:200::0/60") } -func generateCIDRforUDN(v4, v6 string) string { +func generateCIDRforUDN(cs clientset.Interface, v4, v6 string) string { cidr := ` - cidr: ` + v4 + ` ` - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { cidr = ` - cidr: ` + v4 + ` - cidr: ` + v6 + ` ` - } else if isIPv6Supported() { + } else if isIPv6Supported(cs) { cidr = ` - cidr: ` + v6 + ` ` @@ -2216,11 +2227,33 @@ func generateCIDRforUDN(v4, v6 string) string { return cidr } -func generateCIDRforClusterUDN(v4, v6 string) string { +func filterDualStackCIDRs(cs clientset.Interface, cidrs udnv1.DualStackCIDRs) udnv1.DualStackCIDRs { + filteredCIDRs := make(udnv1.DualStackCIDRs, 0, len(cidrs)) + for _, cidr := range cidrs { + if !isCIDRIPFamilySupported(cs, string(cidr)) { + continue + } + filteredCIDRs = append(filteredCIDRs, cidr) + } + return filteredCIDRs +} + +func filterL3Subnets(cs clientset.Interface, l3Subnets []udnv1.Layer3Subnet) []udnv1.Layer3Subnet { + filteredL3Subnets := make([]udnv1.Layer3Subnet, 0, len(l3Subnets)) + for _, l3Subnet := range l3Subnets { + if !isCIDRIPFamilySupported(cs, string(l3Subnet.CIDR)) { + continue + } + filteredL3Subnets = append(filteredL3Subnets, l3Subnet) + } + return filteredL3Subnets +} + +func generateCIDRforClusterUDN(cs clientset.Interface, v4, v6 string) string { cidr := `[{cidr: ` + v4 + `}]` - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { cidr = `[{cidr: ` + v4 + `},{cidr: ` + v6 + `}]` - } else if isIPv6Supported() { + } else if isIPv6Supported(cs) { cidr = `[{cidr: ` + v6 + `}]` } return cidr @@ -2344,15 +2377,15 @@ func connectToServerViaDefaultNetwork(clientPodConfig podConfiguration, serverIP } // assertClientExternalConnectivity checks if the client can connect to an externally created IP outside the cluster -func assertClientExternalConnectivity(clientPodConfig podConfiguration, externalIpv4 string, externalIpv6 string, port uint16) { - if isIPv4Supported() { +func assertClientExternalConnectivity(cs clientset.Interface, clientPodConfig podConfiguration, externalIpv4 string, externalIpv6 string, port uint16) { + if isIPv4Supported(cs) { By("asserting the *client* pod can contact the server's v4 IP located outside the cluster") Eventually(func() error { return connectToServer(clientPodConfig, externalIpv4, port) }, 2*time.Minute, 6*time.Second).Should(Succeed()) } - if isIPv6Supported() { + if isIPv6Supported(cs) { By("asserting the *client* pod can contact the server's v6 IP located outside the cluster") Eventually(func() error { return connectToServer(clientPodConfig, externalIpv6, port) @@ -2360,15 +2393,15 @@ func assertClientExternalConnectivity(clientPodConfig podConfiguration, external } } -func expectedNumberOfRoutes(netConfig networkAttachmentConfigParams) int { +func expectedNumberOfRoutes(cs clientset.Interface, netConfig networkAttachmentConfigParams) int { if netConfig.topology == "layer2" { - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { return 4 // 2 routes per family } else { return 2 //one family supported } } - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { return 6 // 3 v4 routes + 3 v6 routes for UDN } return 3 //only one family, each has 3 routes diff --git a/test/e2e/network_segmentation_endpointslices_mirror.go b/test/e2e/network_segmentation_endpointslices_mirror.go index 3790b2d568..fddadb1cea 100644 --- a/test/e2e/network_segmentation_endpointslices_mirror.go +++ b/test/e2e/network_segmentation_endpointslices_mirror.go @@ -60,6 +60,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ ) { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) replicas := int32(3) @@ -125,7 +126,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, false, @@ -135,7 +136,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, false, @@ -145,7 +146,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, true, @@ -155,7 +156,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, true, @@ -195,6 +196,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ Expect(err).NotTo(HaveOccurred()) By("creating the network") netConfig.namespace = defaultNetNamespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) replicas := int32(3) @@ -233,7 +235,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "secondary", }, ), @@ -242,7 +244,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "secondary", }, ), diff --git a/test/e2e/network_segmentation_localnet.go b/test/e2e/network_segmentation_localnet.go index a6b68db97c..1647baa9fa 100644 --- a/test/e2e/network_segmentation_localnet.go +++ b/test/e2e/network_segmentation_localnet.go @@ -67,9 +67,10 @@ var _ = Describe("Network Segmentation: Localnet", func() { name: cudnName, physicalNetworkName: physicalNetworkName, vlanID: vlan, - cidr: correctCIDRFamily(subnetIPv4, subnetIPv6), - excludeCIDRs: selectCIDRs(excludeSubnetIPv4, excludeSubnetIPv6), + cidr: filterCIDRsAndJoin(f.ClientSet, joinCIDRs(subnetIPv4, subnetIPv6)), + excludeCIDRs: filterCIDRs(f.ClientSet, excludeSubnetIPv4, excludeSubnetIPv6), } + cudnYAML := newLocalnetCUDNYaml(netConf, nsBlue, nsRed) cleanup, err := createManifest("", cudnYAML) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index f00dd63bec..ffcf5f728a 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -85,6 +85,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ ginkgo.By("Creating the attachment configuration") netConfig := newNetworkAttachmentConfig(netConfigParams) netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), generateNAD(netConfig), @@ -137,7 +138,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -157,7 +158,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -190,7 +191,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ nad := networkAttachmentConfigParams{ topology: topology, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: filterCIDRsAndJoin(cs, joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet)), // The yellow, blue and red namespaces are going to served by green network. // Use random suffix for the network name to avoid race between tests. networkName: fmt.Sprintf("%s-%s", "green", rand.String(randomStringLength)), @@ -202,6 +203,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ for _, namespace := range []string{namespaceYellow, namespaceBlue} { ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) netConfig := newNetworkAttachmentConfig(nad) + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) netConfig.namespace = namespace netConfig.name = netConfName diff --git a/test/e2e/network_segmentation_services.go b/test/e2e/network_segmentation_services.go index 6f0822064f..8d2678c178 100644 --- a/test/e2e/network_segmentation_services.go +++ b/test/e2e/network_segmentation_services.go @@ -113,6 +113,7 @@ var _ = Describe("Network Segmentation: services", feature.NetworkSegmentation, By("Creating the attachment configuration") netConfig := newNetworkAttachmentConfig(netConfigParams) netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) _, err = nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), generateNAD(netConfig), @@ -268,7 +269,7 @@ ips=$(ip -o addr show dev $iface| grep global |awk '{print $4}' | cut -d/ -f1 | networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -277,7 +278,7 @@ ips=$(ip -o addr show dev $iface| grep global |awk '{print $4}' | cut -d/ -f1 | networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), diff --git a/test/e2e/pod.go b/test/e2e/pod.go index f5b7b12aae..e43ecee03a 100644 --- a/test/e2e/pod.go +++ b/test/e2e/pod.go @@ -109,11 +109,11 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container (%s)", externalContainer) - if isIPv4Supported() { + if isIPv4Supported(f.ClientSet) { gomega.Expect(externalContainer.GetIPv4()).ToNot(gomega.BeEmpty()) externalContainerIPs = append(externalContainerIPs, externalContainer.GetIPv4()) } - if isIPv6Supported() { + if isIPv6Supported(f.ClientSet) { gomega.Expect(externalContainer.GetIPv6()).ToNot(gomega.BeEmpty()) externalContainerIPs = append(externalContainerIPs, fmt.Sprintf("[%s]", externalContainer.GetIPv6())) } @@ -155,7 +155,7 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { primaryInf, err := infraprovider.Get().GetK8NodeNetworkInterface(clientPodNodeName, providerPrimaryNetwork) framework.ExpectNoError(err, "failed to get provider primary network interface info") clientnodeIP := primaryInf.IPv4 - if IsIPv6Cluster(f.ClientSet) && isIPv6Supported() { + if IsIPv6Cluster(f.ClientSet) && isIPv6Supported(f.ClientSet) { clientnodeIP = fmt.Sprintf("[%s]", primaryInf.IPv6) } gomega.Expect(clientnodeIP).NotTo(gomega.BeEmpty()) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index f6dcdfc800..56bb25ccc6 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -53,10 +53,10 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is bgpServer := infraapi.ExternalContainer{Name: serverContainerName} networkInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(bgpServer, bgpNetwork) framework.ExpectNoError(err, "container %s attached to network %s must contain network info", serverContainerName, bgpExternalNetworkName) - if isIPv4Supported() && len(networkInterface.IPv4) > 0 { + if isIPv4Supported(f.ClientSet) && len(networkInterface.IPv4) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv4) } - if isIPv6Supported() && len(networkInterface.IPv6) > 0 { + if isIPv6Supported(f.ClientSet) && len(networkInterface.IPv6) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv6) } framework.Logf("The external server IPs are: %+v", serverContainerIPs) @@ -219,7 +219,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) expectedPodIP := podv4IP - if isIPv6Supported() && utilnet.IsIPv6String(serverContainerIP) { + if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { expectedPodIP = podv6IP // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") @@ -267,10 +267,10 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert bgpServer := infraapi.ExternalContainer{Name: serverContainerName} networkInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(bgpServer, bgpNetwork) framework.ExpectNoError(err, "container %s attached to network %s must contain network info", serverContainerName, bgpExternalNetworkName) - if isIPv4Supported() && len(networkInterface.IPv4) > 0 { + if isIPv4Supported(f.ClientSet) && len(networkInterface.IPv4) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv4) } - if isIPv6Supported() && len(networkInterface.IPv6) > 0 { + if isIPv6Supported(f.ClientSet) && len(networkInterface.IPv6) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv6) } gomega.Expect(len(serverContainerIPs)).Should(gomega.BeNumerically(">", 0), "failed to find external container IPs") @@ -308,6 +308,12 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ginkgo.By("create ClusterUserDefinedNetwork") udnClient, err := udnclientset.NewForConfig(f.ClientConfig()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if cudnTemplate.Spec.Network.Layer3 != nil { + cudnTemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnTemplate.Spec.Network.Layer3.Subnets) + } + if cudnTemplate.Spec.Network.Layer2 != nil { + cudnTemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnTemplate.Spec.Network.Layer2.Subnets) + } cUDN, err := udnClient.K8sV1().ClusterUserDefinedNetworks().Create(context.Background(), cudnTemplate, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.DeferCleanup(func() { @@ -418,7 +424,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert framework.Poll, 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) - if isIPv6Supported() && utilnet.IsIPv6String(serverContainerIP) { + if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { podIP, err = podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") @@ -442,13 +448,13 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "103.103.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2014:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -487,7 +493,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("103.0.0.0/16", "2014:100::0/60"), + Subnets: udnv1.DualStackCIDRs{"103.0.0.0/16", "2014:100::0/60"}, }, }, }, @@ -572,6 +578,19 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" udnClient, err := udnclientset.NewForConfig(f.ClientConfig()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if cudnATemplate.Spec.Network.Layer3 != nil { + cudnATemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnATemplate.Spec.Network.Layer3.Subnets) + } + if cudnATemplate.Spec.Network.Layer2 != nil { + cudnATemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnATemplate.Spec.Network.Layer2.Subnets) + } + if cudnBTemplate.Spec.Network.Layer3 != nil { + cudnBTemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnBTemplate.Spec.Network.Layer3.Subnets) + } + if cudnBTemplate.Spec.Network.Layer2 != nil { + cudnBTemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnBTemplate.Spec.Network.Layer2.Subnets) + } + cudnA, err = udnClient.K8sV1().ClusterUserDefinedNetworks().Create(context.Background(), cudnATemplate, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -798,7 +817,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return fmt.Errorf("expected connectivity check to contain %q, got %q", expectedOutput, out) } } - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(f.ClientSet) && isIPv4Supported(f.ClientSet) { // use ipFamilyIndex of 1 to pick the IPv6 addresses clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(1) out, err := checkConnectivity(clientName, clientNamespace, dst) @@ -988,13 +1007,13 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "102.102.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2013:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -1008,13 +1027,13 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "103.103.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2014:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -1031,7 +1050,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("102.102.0.0/16", "2013:100:200::0/60"), + Subnets: udnv1.DualStackCIDRs{"102.102.0.0/16", "2013:100:200::0/60"}, }, }, }, @@ -1045,32 +1064,10 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("103.103.0.0/16", "2014:100:200::0/60"), + Subnets: udnv1.DualStackCIDRs{"103.103.0.0/16", "2014:100:200::0/60"}, }, }, }, }, ), ) - -func generateL3Subnets(v4, v6 udnv1.Layer3Subnet) []udnv1.Layer3Subnet { - var subnets []udnv1.Layer3Subnet - if isIPv4Supported() { - subnets = append(subnets, v4) - } - if isIPv6Supported() { - subnets = append(subnets, v6) - } - return subnets -} - -func generateL2Subnets(v4, v6 string) udnv1.DualStackCIDRs { - var subnets udnv1.DualStackCIDRs - if isIPv4Supported() { - subnets = append(subnets, udnv1.CIDR(v4)) - } - if isIPv6Supported() { - subnets = append(subnets, udnv1.CIDR(v6)) - } - return subnets -} diff --git a/test/e2e/util.go b/test/e2e/util.go index aba6dcbc44..2818f33016 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1129,14 +1129,29 @@ func randStr(n int) string { return string(b) } -func isIPv4Supported() bool { - val, present := os.LookupEnv("PLATFORM_IPV4_SUPPORT") - return present && val == "true" +func isCIDRIPFamilySupported(cs clientset.Interface, cidr string) bool { + ginkgo.GinkgoHelper() + gomega.Expect(cidr).To(gomega.ContainSubstring("/")) + isIPv6 := utilnet.IsIPv6CIDRString(cidr) + return (isIPv4Supported(cs) && !isIPv6) || (isIPv6Supported(cs) && isIPv6) } -func isIPv6Supported() bool { - val, present := os.LookupEnv("PLATFORM_IPV6_SUPPORT") - return present && val == "true" +func isIPv4Supported(cs clientset.Interface) bool { + v4, _ := getSupportedIPFamilies(cs) + return v4 +} + +func isIPv6Supported(cs clientset.Interface) bool { + _, v6 := getSupportedIPFamilies(cs) + return v6 +} + +func getSupportedIPFamilies(cs clientset.Interface) (bool, bool) { + n, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) + framework.ExpectNoError(err, "must fetch a Ready Node") + v4NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv4Protocol) + v6NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv6Protocol) + return len(v4NodeAddrs) > 0, len(v6NodeAddrs) > 0 } func isInterconnectEnabled() bool { From b18165069af68d15dd27f38b3ce19d062a572100 Mon Sep 17 00:00:00 2001 From: Martin Kennelly Date: Fri, 27 Jun 2025 11:03:13 +0100 Subject: [PATCH 106/181] E2Es: remove additional references to KinD setup Signed-off-by: Martin Kennelly --- test/e2e/e2e.go | 19 ++++----- test/e2e/multi_node_zones_interconnect.go | 20 ++++----- test/e2e/multihoming.go | 51 ++++++++++++----------- test/e2e/route_advertisements.go | 2 +- 4 files changed, 46 insertions(+), 46 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index ac9bc8fb3b..be1b46bf75 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -17,6 +17,8 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/containerengine" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" @@ -558,16 +560,13 @@ func getApiAddress() string { } // IsGatewayModeLocal returns true if the gateway mode is local -func IsGatewayModeLocal() bool { - anno, err := e2ekubectl.RunKubectl("default", "get", "node", "ovn-control-plane", "-o", "template", "--template={{.metadata.annotations}}") - if err != nil { - framework.Logf("Error getting annotations: %v", err) - return false - } - framework.Logf("Annotations received: %s", anno) - isLocal := strings.Contains(anno, "local") - framework.Logf("IsGatewayModeLocal returning: %v", isLocal) - return isLocal +func IsGatewayModeLocal(cs kubernetes.Interface) bool { + ginkgo.GinkgoHelper() + node, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + l3Config, err := util.ParseNodeL3GatewayAnnotation(node) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "must get node l3 gateway annotation") + return l3Config.Mode == config.GatewayModeLocal } // restartOVNKubeNodePod restarts the ovnkube-node pod from namespace, running on nodeName diff --git a/test/e2e/multi_node_zones_interconnect.go b/test/e2e/multi_node_zones_interconnect.go index 0a358cd7ea..d4cc5356b2 100644 --- a/test/e2e/multi_node_zones_interconnect.go +++ b/test/e2e/multi_node_zones_interconnect.go @@ -91,10 +91,8 @@ func checkPodsInterconnectivity(clientPod, serverPod *v1.Pod, namespace string, var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, func() { const ( - serverPodNodeName = "ovn-control-plane" - serverPodName = "server-pod" - clientPodNodeName = "ovn-worker3" - clientPodName = "client-pod" + serverPodName = "server-pod" + clientPodName = "client-pod" ) fr := wrappedTestFramework("multi-node-zones") @@ -120,13 +118,13 @@ var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, f len(nodes.Items)) } - serverPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), serverPodNodeName, metav1.GetOptions{}) + serverPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) if err != nil { e2eskipper.Skipf( "Test requires node with the name %s", serverPodName, ) } - clientPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), clientPodNodeName, metav1.GetOptions{}) + clientPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), nodes.Items[1].Name, metav1.GetOptions{}) if err != nil { e2eskipper.Skipf( "Test requires node with the name %s", clientPodName, @@ -141,7 +139,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, f if serverPodNodeZone == clientPodNodeZone { e2eskipper.Skipf( - "Test requires nodes %s and %s are in different zones", serverPodNodeName, clientPodNodeName, + "Test requires nodes %s and %s are in different zones", nodes.Items[0].Name, nodes.Items[1].Name, ) } }) @@ -150,13 +148,13 @@ var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, f // Create a server pod on zone - zone-1 cmd := httpServerContainerCmd(8000) serverPod := e2epod.NewAgnhostPod(fr.Namespace.Name, serverPodName, nil, nil, nil, cmd...) - serverPod.Spec.NodeName = serverPodNodeName + serverPod.Spec.NodeName = serverPodNode.Name e2epod.NewPodClient(fr).CreateSync(context.TODO(), serverPod) // Create a client pod on zone - zone-2 cmd = []string{} clientPod := e2epod.NewAgnhostPod(fr.Namespace.Name, clientPodName, nil, nil, nil, cmd...) - clientPod.Spec.NodeName = clientPodNodeName + clientPod.Spec.NodeName = clientPodNode.Name e2epod.NewPodClient(fr).CreateSync(context.TODO(), clientPod) ginkgo.By("asserting the *client* pod can contact the server pod exposed endpoint") @@ -164,7 +162,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, f framework.ExpectNoError(err, "failed to check pods interconnectivity") // Change the zone of client-pod node to that of server-pod node - s := fmt.Sprintf("Changing the client-pod node %s zone from %s to %s", clientPodNodeName, clientPodNodeZone, serverPodNodeZone) + s := fmt.Sprintf("Changing the client-pod node %s zone from %s to %s", clientPodNode.Name, clientPodNodeZone, serverPodNodeZone) ginkgo.By(s) err = changeNodeZone(clientPodNode, serverPodNodeZone, cs) framework.ExpectNoError(err, "failed to change node zone") @@ -174,7 +172,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, f framework.ExpectNoError(err, "failed to check pods interconnectivity") // Change back the zone of client-pod node - s = fmt.Sprintf("Changing back the client-pod node %s zone from %s to %s", clientPodNodeName, serverPodNodeZone, clientPodNodeZone) + s = fmt.Sprintf("Changing back the client-pod node %s zone from %s to %s", clientPodNode.Name, serverPodNodeZone, clientPodNodeZone) ginkgo.By(s) err = changeNodeZone(clientPodNode, clientPodNodeZone, cs) framework.ExpectNoError(err, "failed to change node zone") diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index e16fa151a0..46ad7eedc5 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -272,18 +272,26 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { ) const ( - clientPodName = "client-pod" - clientIPOffset = 100 - serverIPOffset = 102 - port = 9000 - workerOneNodeName = "ovn-worker" - workerTwoNodeName = "ovn-worker2" + clientPodName = "client-pod" + clientIPOffset = 100 + serverIPOffset = 102 + port = 9000 ) ginkgo.DescribeTable("attached to a localnet network mapped to breth0", - func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration) { - + func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration, isCollocatedPods bool) { + By("Get two scheduable nodes and ensure client and server are located on distinct Nodes") + nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) + framework.ExpectNoError(err, "2 scheduable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") + if isCollocatedPods { + clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + } else { + clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} + } netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{ name: secondaryNetworkName, namespace: f.Namespace.Name, @@ -307,7 +315,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { nad := generateNAD(netConfig) By(fmt.Sprintf("creating the attachment configuration: %v\n", nad)) - _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( + _, err = nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), nad, metav1.CreateOptions{}, @@ -369,7 +377,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, podConfiguration{ // client on default network name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerOneNodeName}, isPrivileged: true, }, podConfiguration{ // server attached to localnet secondary network @@ -378,9 +385,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }}, name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, needsIPRequestFromHostSubnet: true, // will override attachments above with an IPRequest }, + false, // scheduled on distinct Nodes Label("BUG", "OCPBUGS-43004"), ), ginkgo.Entry( @@ -391,7 +398,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, podConfiguration{ // client on default network name: clientPodName + "-same-node", - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, isPrivileged: true, }, podConfiguration{ // server attached to localnet secondary network @@ -400,9 +406,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }}, name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, needsIPRequestFromHostSubnet: true, }, + true, // collocated on same Node Label("BUG", "OCPBUGS-43004"), ), ginkgo.Entry( @@ -416,16 +422,15 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Name: secondaryNetworkName, }}, name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerOneNodeName}, isPrivileged: true, needsIPRequestFromHostSubnet: true, }, podConfiguration{ // server on default network, pod is host-networked name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, hostNetwork: true, }, + false, // not collocated on same node Label("STORY", "SDN-5345"), ), ginkgo.Entry( @@ -439,16 +444,15 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Name: secondaryNetworkName, }}, name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, isPrivileged: true, needsIPRequestFromHostSubnet: true, }, podConfiguration{ // server on default network, pod is host-networked name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, hostNetwork: true, }, + true, // collocated on same node Label("STORY", "SDN-5345"), ), ) @@ -456,13 +460,11 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("multiple pods connected to the same OVN-K secondary network", func() { const ( - workerOneNodeName = "ovn-worker" - workerTwoNodeName = "ovn-worker2" - clientPodName = "client-pod" - nodeHostnameKey = "kubernetes.io/hostname" - port = 9000 - clientIP = "192.168.200.10/24" - staticServerIP = "192.168.200.20/24" + clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" + port = 9000 + clientIP = "192.168.200.10/24" + staticServerIP = "192.168.200.20/24" ) ginkgo.It("eventually configures pods that were added to an already existing network before the nad", func() { @@ -567,6 +569,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { By("Get two scheduable nodes and schedule client and server to be on distinct Nodes") nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) framework.ExpectNoError(err, "2 scheduable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index f6dcdfc800..2cc4c2318f 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -299,7 +299,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Values: []string{f.Namespace.Name}, }}} - if IsGatewayModeLocal() && cudnTemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { + if IsGatewayModeLocal(f.ClientSet) && cudnTemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { e2eskipper.Skipf( "BGP for L2 networks on LGW is currently unsupported", ) From a6cac97aed379e36f8c5d152d96ca788991866c2 Mon Sep 17 00:00:00 2001 From: Alin Gabriel Serdean Date: Tue, 24 Jun 2025 08:20:37 +0000 Subject: [PATCH 107/181] ovnkube in dpu host mode: advanced gateway detection This patch introduces a new way to detect the gateway interface in the case ovnkube is running in DPU-HOST mode. Introduce the argument derive-from-mgmt-port, if this is specified as the gateway interface, we will identify the physical function of the device used as an management port accelerated interface. Signed-off-by: Alin Gabriel Serdean --- .../hardware-offload/derive-from-mgmt-port.md | 163 ++++++++++++++ .../hardware-offload/dpu-gateway-interface.md | 208 ++++++++++++++++++ docs/features/hardware-offload/dpu-support.md | 2 + docs/installation/ovn_k8s.conf.5 | 10 + .../node/default_node_network_controller.go | 30 +++ .../default_node_network_controller_test.go | 199 +++++++++++++++++ go-controller/pkg/types/const.go | 3 + 7 files changed, 615 insertions(+) create mode 100644 docs/features/hardware-offload/derive-from-mgmt-port.md create mode 100644 docs/features/hardware-offload/dpu-gateway-interface.md diff --git a/docs/features/hardware-offload/derive-from-mgmt-port.md b/docs/features/hardware-offload/derive-from-mgmt-port.md new file mode 100644 index 0000000000..e7d7f38194 --- /dev/null +++ b/docs/features/hardware-offload/derive-from-mgmt-port.md @@ -0,0 +1,163 @@ +# From PCI Address Gateway Interface Feature + +## Overview + +The "derive-from-mgmt-port" gateway interface feature is a new capability in OVN-Kubernetes that enables automatic gateway interface resolution in DPU (Data Processing Unit) host mode deployments. This feature automatically discovers and configures the appropriate Physical Function (PF) interface as the gateway interface based on the Virtual Function (VF) used for the management port. + +## Problem Statement + +In DPU deployments, the host typically has access to Virtual Functions (VFs) for management purposes, while the Physical Functions (PFs) are used for external connectivity. Previously, administrators had to manually specify the gateway interface, which required: + +1. Knowledge of the hardware topology +2. Manual mapping of VF to PF relationships +3. Configuration updates when hardware changes +4. Potential for misconfiguration + +## Solution + +The "derive-from-mgmt-port" feature automates the gateway interface discovery process by: + +1. **Automatic Discovery**: Automatically finds the PF interface associated with the management port VF +2. **Hardware Abstraction**: Eliminates the need for manual hardware topology knowledge +3. **Dynamic Configuration**: Adapts to hardware changes automatically +4. **Reduced Configuration**: Simplifies deployment configuration + +## Benefits + +### For Administrators + +- **Simplified Configuration**: No need to manually specify gateway interfaces +- **Reduced Errors**: Eliminates manual mapping errors +- **Hardware Agnostic**: Works with any SR-IOV capable hardware +- **Dynamic Adaptation**: Automatically adapts to hardware changes + +### For Operations + +- **Faster Deployment**: Reduced configuration time +- **Consistent Setup**: Standardized gateway interface selection +- **Reduced Maintenance**: Less manual intervention required +- **Better Reliability**: Fewer configuration-related issues + +### For Development + +- **Cleaner Code**: Centralized gateway interface logic +- **Better Testing**: Comprehensive unit test coverage +- **Extensible Design**: Foundation for future enhancements + +## Technical Implementation + +### Code Changes + +1. **New Constant**: Added `DeriveFromMgmtPort = "derive-from-mgmt-port"` constant in `go-controller/pkg/types/const.go` +2. **Enhanced Logic**: Extended gateway initialization in `go-controller/pkg/node/default_node_network_controller.go` +3. **Comprehensive Testing**: Added unit tests covering success and failure scenarios + +### Key Functions + +- `getManagementPortNetDev()`: Resolves management port device name +- `GetPciFromNetDevice()`: Retrieves PCI address from network device +- `GetPfPciFromVfPci()`: Resolves PF PCI address from VF PCI address +- `GetNetDevicesFromPci()`: Discovers network devices associated with PCI address + +### Error Handling + +The implementation includes robust error handling for: +- Missing network devices +- PCI address resolution failures +- SR-IOV operation failures +- Hardware compatibility issues + +## Configuration Examples + +### Basic Configuration + +```bash +--ovnkube-node-mode=dpu-host +--ovnkube-node-mgmt-port-netdev=pf0vf0 +--gateway-interface=derive-from-mgmt-port +``` + +### Helm Configuration + +```yaml +ovnkube-node: + mode: dpu-host + mgmtPortNetdev: pf0vf0 + +gateway: + interface: derive-from-mgmt-port +``` + +### Configuration File + +```ini +[OvnKubeNode] +mode=dpu-host +mgmt-port-netdev=pf0vf0 + +[Gateway] +interface=derive-from-mgmt-port +``` + +## Migration Guide + +### From Manual Configuration + +**Before:** +```bash +--gateway-interface=eth0 +``` + +**After:** +```bash +--gateway-interface=derive-from-mgmt-port +``` + +### Verification Steps + +1. Verify SR-IOV configuration is correct +2. Ensure management port device is properly configured +3. Check that PF interfaces are available +4. Monitor logs for successful gateway interface resolution + +## Testing + +### Unit Tests + +Comprehensive unit tests cover: +- Successful gateway interface resolution +- Error handling for missing devices +- PCI address resolution failures +- Network device discovery failures + +### Integration Tests + +The feature integrates with existing: +- Gateway initialization +- DPU host mode functionality +- SR-IOV operations +- Network configuration + +## Future Enhancements + +Potential improvements include: +- Support for multiple gateway interfaces +- Enhanced device selection criteria +- Integration with device plugins +- Support for non-SR-IOV hardware +- Advanced error reporting and diagnostics + +## Related Documentation + +- [DPU Gateway Interface Configuration](dpu-gateway-interface.md) +- [DPU Support](dpu-support.md) +- [Gateway Accelerated Interface Configuration](../design/gateway-accelerated-interface-configuration.md) +- [Configuration Guide](../../getting-started/configuration.md) + +## Support + +For issues related to this feature: +1. Check the troubleshooting section in the DPU Gateway Interface Configuration guide +2. Verify SR-IOV hardware and driver support +3. Review error messages and logs +4. Consult the OVN-Kubernetes community for additional support \ No newline at end of file diff --git a/docs/features/hardware-offload/dpu-gateway-interface.md b/docs/features/hardware-offload/dpu-gateway-interface.md new file mode 100644 index 0000000000..49f3e6ccec --- /dev/null +++ b/docs/features/hardware-offload/dpu-gateway-interface.md @@ -0,0 +1,208 @@ +# DPU Gateway Interface Configuration + +## Overview + +In DPU (Data Processing Unit) host mode deployments, OVN-Kubernetes supports automatic gateway interface resolution from PCI address. This feature is particularly useful when the management port is a Virtual Function (VF) and you want to automatically select the corresponding Physical Function (PF) interface as the gateway. + +## Background + +In DPU deployments, the host typically has access to Virtual Functions (VFs) for management purposes, while the Physical Functions (PFs) are used for external connectivity. The "derive-from-mgmt-port" feature allows OVN-Kubernetes to automatically discover and configure the appropriate PF interface as the gateway interface based on the VF used for the management port. + +## How It Works + +When configured with `--gateway-interface=derive-from-mgmt-port`, OVN-Kubernetes performs the following steps: + +1. **Management Port Resolution**: Gets the management port network device name (specified by `--ovnkube-node-mgmt-port-netdev`) +2. **VF PCI Address Retrieval**: Retrieves the PCI address of the management port device (VF) +3. **PF PCI Address Resolution**: Gets the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address +4. **Network Device Discovery**: Retrieves all network devices associated with the PF PCI address +5. **Interface Selection**: Selects the first available network device as the gateway interface + +## Configuration + +### Command Line Options + +```bash +--ovnkube-node-mode=dpu-host +--ovnkube-node-mgmt-port-netdev=pf0vf0 +--gateway-interface=derive-from-mgmt-port +``` + +### Configuration File + +```ini +[OvnKubeNode] +mode=dpu-host +mgmt-port-netdev=pf0vf0 + +[Gateway] +interface=derive-from-mgmt-port +``` + +### Helm Configuration + +```yaml +ovnkube-node: + mode: dpu-host + mgmtPortNetdev: pf0vf0 + +gateway: + interface: derive-from-mgmt-port +``` + +## Example Scenario + +Consider a DPU setup with the following configuration: + +- **Management port device**: `pf0vf0` (Virtual Function) +- **VF PCI address**: `0000:01:02.3` +- **PF PCI address**: `0000:01:00.0` +- **Available PF interfaces**: `eth0`, `eth1` + +With `--gateway-interface=derive-from-mgmt-port`, OVN-Kubernetes will: + +1. Start with the management port device `pf0vf0` +2. Get its PCI address `0000:01:02.3` +3. Resolve the PF PCI address to `0000:01:00.0` +4. Find all network devices associated with PF `0000:01:00.0`: `eth0`, `eth1` +5. Select `eth0` (first device) as the gateway interface + +## Requirements + +### Hardware Requirements + +- SR-IOV capable network interface card +- Virtual Function (VF) and Physical Function (PF) setup +- Management port configured as a VF + +### Software Requirements + +- SR-IOV utilities available on the system +- OVN-Kubernetes running in DPU host mode +- Proper VF/PF driver support + +### Configuration Requirements + +- Must be used in DPU host mode (`--ovnkube-node-mode=dpu-host`) +- Management port netdev must be specified (`--ovnkube-node-mgmt-port-netdev`) +- Gateway interface must be set to `derive-from-mgmt-port` + +## Error Handling + +The system will return an error in the following scenarios: + +### No Network Devices Found + +``` +no netdevs found for pci address 0000:01:00.0 +``` + +**Cause**: The PF PCI address doesn't have any associated network devices. + +**Resolution**: Verify that the PF has network interfaces configured and are visible to the system. + +### PCI Address Resolution Failure + +``` +failed to get PCI address +``` + +**Cause**: Unable to retrieve the PCI address from the management port device. + +**Resolution**: Ensure the management port device exists and is properly configured. + +### PF PCI Address Resolution Failure + +``` +failed to get PF PCI address +``` + +**Cause**: Unable to resolve the PF PCI address from the VF PCI address. + +**Resolution**: Verify SR-IOV configuration and driver support. + +### Network Device Discovery Failure + +``` +failed to get network devices +``` + +**Cause**: Unable to retrieve network devices associated with the PF PCI address. + +**Resolution**: Check SR-IOV utilities and system configuration. + +## Troubleshooting + +### Verify SR-IOV Configuration + +```bash +# Check if SR-IOV is enabled +lspci | grep -i ethernet + +# Check VF configuration +ip link show + +# Check PF/VF relationship +ls /sys/bus/pci/devices/*/virtfn* +``` + +### Verify Management Port Device + +```bash +# Check if management port device exists +ip link show pf0vf0 + +# Check PCI address +ethtool -i pf0vf0 | grep bus-info +``` + +### Debug PCI Address Resolution + +```bash +# Get VF PCI address +cat /sys/class/net/pf0vf0/device/address + +# Get PF PCI address (if available) +cat /sys/class/net/pf0vf0/device/physfn/address +``` + +## Integration with Existing Features + +### Gateway Accelerated Interface + +The "derive-from-mgmt-port" feature is used in conjunction with management interface to select the appropriate gateway accelerated interface. + +The management port can be specified through one of the following options: +``` + --ovnkube-node-mgmt-port-netdev) + OVNKUBE_NODE_MGMT_PORT_NETDEV=$VALUE +``` + +``` + --ovnkube-node-mgmt-port-dp-resource-name) + OVNKUBE_NODE_MGMT_PORT_DP_RESOURCE_NAME=$VALUE +``` + +OVNKUBE_NODE_MGMT_PORT_DP_RESOURCE_NAME has priority over OVNKUBE_NODE_MGMT_PORT_NETDEV and it is easier to use since it points to a SRIOV Device Plugin pool name. + +### Multiple Network Support + +This feature works with multiple network support and can be used in environments where pods have multiple interfaces connected to different networks. + +## Limitations + +- Only available in DPU host mode +- Requires SR-IOV capable hardware +- Limited to the first available network device from the PF +- Depends on proper VF/PF driver support +- May not work with all SR-IOV implementations + +## Future Enhancements + +Potential improvements to this feature could include: + +- Support for selecting specific network devices based on criteria +- Integration with device plugin resources +- Support for multiple gateway interfaces +- Enhanced error reporting and diagnostics +- Support for non-SR-IOV hardware configurations \ No newline at end of file diff --git a/docs/features/hardware-offload/dpu-support.md b/docs/features/hardware-offload/dpu-support.md index bc9d731a39..6ac6a5ca7d 100644 --- a/docs/features/hardware-offload/dpu-support.md +++ b/docs/features/hardware-offload/dpu-support.md @@ -43,6 +43,8 @@ These aforementioned parts are expected to be deployed also on two different Kub #### OVN Kubernetes component on a DPU-Enabled Host - ovn-node +For detailed configuration of gateway interfaces in DPU host mode, see [DPU Gateway Interface Configuration](dpu-gateway-interface.md). + ### DPU Cluster --- diff --git a/docs/installation/ovn_k8s.conf.5 b/docs/installation/ovn_k8s.conf.5 index fc790db071..10f224b831 100644 --- a/docs/installation/ovn_k8s.conf.5 +++ b/docs/installation/ovn_k8s.conf.5 @@ -124,6 +124,16 @@ or set to "shared" (share a network interface) or "local" (use a NAT-ed virtual This interface will be used as the gateway interface in "shared" mode. If not specified the interface with the default route will be used. .TP +\fBinterface\fR=derive-from-mgmt-port +In DPU host mode, automatically resolve the gateway interface from PCI address. +This performs the following steps: +1. Get the management port network device name +2. Retrieve the PCI address of the management port device +3. Get the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address +4. Retrieve all network devices associated with the PF PCI address +5. Select the first available network device as the gateway interface +This option requires SR-IOV capable hardware and must be used with DPU host mode. +.TP \fBnext-hop\fR=1.2.3.4 This is the gateway IP address of \fBinterface\fR to which traffic exiting the OVN logical network should be sent in "shared" mode. If not specified diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 558f7be8c9..51dc1571e1 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -1028,6 +1028,36 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // Complete gateway initialization if config.OvnKubeNode.Mode == types.NodeModeDPUHost { + // Resolve gateway interface from PCI address when configured as "derive-from-mgmt-port" + // This performs the following steps: + // Get the management port network device name + // Retrieve the PCI address of the management port device + // Get the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address + // Retrieve all network devices associated with the PF PCI address + // Select the first available network device as the gateway interface + if config.Gateway.Interface == types.DeriveFromMgmtPort { + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + if err != nil { + return err + } + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + if err != nil { + return err + } + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + if err != nil { + return err + } + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + if err != nil { + return err + } + if len(netdevs) == 0 { + return fmt.Errorf("no netdevs found for pci address %s", pfPciAddr) + } + netdevName = netdevs[0] + config.Gateway.Interface = netdevName + } err = nc.initGatewayDPUHost(nc.nodeAddress) if err != nil { return err diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index cb7087ff1b..875b0da694 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -1560,5 +1560,204 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } }) }) + + Describe("derive-from-mgmt-port gateway interface resolution", func() { + var ( + kubeMock *mocks.Interface + sriovnetMock utilMocks.SriovnetOps + netlinkOpsMock *utilMocks.NetLinkOps + netlinkLinkMock *netlink_mocks.Link + ) + + const ( + nodeName = "test-node" + mgmtPortNetdev = "pf0vf0" + vfPciAddr = "0000:01:02.3" + pfPciAddr = "0000:01:00.0" + expectedGatewayIntf = "eth0" + ) + + BeforeEach(func() { + kubeMock = new(mocks.Interface) + sriovnetMock = utilMocks.SriovnetOps{} + netlinkOpsMock = new(utilMocks.NetLinkOps) + netlinkLinkMock = new(netlink_mocks.Link) + + util.SetSriovnetOpsInst(&sriovnetMock) + util.SetNetLinkOpMockInst(netlinkOpsMock) + + // Setup default node network controller + cnnci := &CommonNodeNetworkControllerInfo{ + name: nodeName, + Kube: kubeMock, + } + nc = &DefaultNodeNetworkController{ + BaseNodeNetworkController: BaseNodeNetworkController{ + CommonNodeNetworkControllerInfo: *cnnci, + ReconcilableNetInfo: &util.DefaultNetInfo{}, + }, + } + + // Set DPU host mode + config.OvnKubeNode.Mode = types.NodeModeDPUHost + config.OvnKubeNode.MgmtPortNetdev = mgmtPortNetdev + config.Gateway.Interface = types.DeriveFromMgmtPort + }) + + AfterEach(func() { + util.ResetNetLinkOpMockInst() + }) + + Context("when gateway interface is set to derive-from-mgmt-port", func() { + It("should resolve gateway interface from PCI address successfully", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return available network devices + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return([]string{expectedGatewayIntf, "eth1"}, nil) + + // Execute the gateway interface resolution logic + // This simulates the logic in the Start() method + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevName).To(Equal(mgmtPortNetdev)) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + Expect(pciAddr).To(Equal(vfPciAddr)) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(pfPciAddr).To(Equal(pfPciAddr)) + + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevs).To(HaveLen(2)) + Expect(netdevs[0]).To(Equal(expectedGatewayIntf)) + + // Verify that the first device is selected as the gateway interface + selectedNetdev := netdevs[0] + Expect(selectedNetdev).To(Equal(expectedGatewayIntf)) + }) + + It("should return error when no network devices found for PCI address", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return empty list + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return([]string{}, nil) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevs).To(BeEmpty()) + + // This should result in an error when no devices are found + Expect(netdevs).To(BeEmpty()) + }) + + It("should return error when GetPciFromNetDevice fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return error + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return("", fmt.Errorf("failed to get PCI address")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get PCI address")) + }) + + It("should return error when GetPfPciFromVfPci fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return error + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return("", fmt.Errorf("failed to get PF PCI address")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get PF PCI address")) + }) + + It("should return error when GetNetDevicesFromPci fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return error + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return(nil, fmt.Errorf("failed to get network devices")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get network devices")) + }) + }) + }) }) }) diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 452421d289..8ba7269cad 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -187,6 +187,9 @@ const ( NodeModeDPU = "dpu" NodeModeDPUHost = "dpu-host" + // Gateway interface configuration + DeriveFromMgmtPort = "derive-from-mgmt-port" + // Geneve header length for IPv4 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) GeneveHeaderLengthIPv4 = 58 // Geneve header length for IPv6 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) From 9e356ba505445561734660f7b22dfb10631a1aa3 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 17 Jul 2025 09:06:35 +0000 Subject: [PATCH 108/181] doc: Remove OKEP template from the list of rendered pages Signed-off-by: Ihar Hrachyshka --- mkdocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index e5cd4a3dd6..f82f75c977 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -144,7 +144,6 @@ nav: - OVN observability: observability/ovn-observability.md - Enhancement Proposals: # - FeatureName: okeps/ - - Template: okeps/okep-4368-template.md - Localnet API: okeps/okep-5085-localnet-api.md - Network QoS: okeps/okep-4380-network-qos.md - User Defined Networks: okeps/okep-5193-user-defined-networks.md From 82850ed2b176cbef36419a92f718166a0b9f9d65 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Fri, 4 Jul 2025 14:05:54 +0200 Subject: [PATCH 109/181] Enable Layer2 route advertisements on LGW Configure use_ct_inv_match=false on nbdb when route advertisements are enabled in LGW to support assymetric traffic on Layer2 advertised networks. Signed-off-by: Patryk Diak --- dist/images/ovnkube.sh | 5 ++ .../ovnkube-single-node-zone.yaml.j2 | 4 ++ .../routeadvertisements/controller.go | 4 -- test/e2e/route_advertisements.go | 52 +++++++++++-------- 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 192f342662..3931d4e180 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -989,6 +989,11 @@ local-nb-ovsdb() { wait_for_event attempts=3 process_ready ovnnb_db echo "=============== nb-ovsdb (unix sockets only) ========== RUNNING" + [[ "local" == "${OVN_GATEWAY_MODE}" && "true" == "${OVN_ROUTE_ADVERTISEMENTS_ENABLE}" ]] && { + ovn-nbctl set NB_Global . options:use_ct_inv_match=false + echo "=============== nb-ovsdb ========== reconfigured for route advertisements" + } + # Let ovn-northd sleep and not use so much CPU ovn-nbctl set NB_Global . options:northd-backoff-interval-ms=${ovn_northd_backoff_interval} echo "=============== nb-ovsdb ========== reconfigured for northd backoff" diff --git a/dist/templates/ovnkube-single-node-zone.yaml.j2 b/dist/templates/ovnkube-single-node-zone.yaml.j2 index df5533a668..ad4e4488f9 100644 --- a/dist/templates/ovnkube-single-node-zone.yaml.j2 +++ b/dist/templates/ovnkube-single-node-zone.yaml.j2 @@ -79,6 +79,10 @@ spec: value: "{{ ovn_loglevel_nb }}" - name: OVN_NORTHD_BACKOFF_INTERVAL value: "{{ ovn_northd_backoff_interval }}" + - name: OVN_GATEWAY_MODE + value: "{{ ovn_gateway_mode }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: K8S_APISERVER valueFrom: configMapKeyRef: diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 04daa6cde1..11f7eb79ab 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -373,10 +373,6 @@ func (c *Controller) generateFRRConfigurations(ra *ratypes.RouteAdvertisements) return nil, nil, fmt.Errorf("%w: selected network %q has unsupported topology %q", errConfig, networkName, network.TopologyType()) } - if config.Gateway.Mode == config.GatewayModeLocal && network.TopologyType() == types.Layer2Topology { - return nil, nil, fmt.Errorf("%w: BGP is currently not supported for Layer2 networks in local gateway mode, network: %s", errConfig, network.GetNetworkName()) - } - if advertisements.Has(ratypes.EgressIP) && network.TopologyType() == types.Layer2Topology { return nil, nil, fmt.Errorf("%w: EgressIP advertisement is currently not supported for Layer2 networks, network: %s", errConfig, network.GetNetworkName()) } diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 2cc4c2318f..66c258eeae 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -299,11 +299,6 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Values: []string{f.Namespace.Name}, }}} - if IsGatewayModeLocal(f.ClientSet) && cudnTemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - e2eskipper.Skipf( - "BGP for L2 networks on LGW is currently unsupported", - ) - } // Create CUDN ginkgo.By("create ClusterUserDefinedNetwork") udnClient, err := udnclientset.NewForConfig(f.ClientConfig()) @@ -521,6 +516,10 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", func(cudnATemplate, cudnBTemplate *udnv1.ClusterUserDefinedNetwork) { const curlConnectionTimeoutCode = "28" + const ( + ipFamilyV4 = iota + ipFamilyV6 + ) f := wrappedTestFramework("bpp-network-isolation") f.SkipNamespaceCreation = true @@ -536,9 +535,6 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" var ra *rav1.RouteAdvertisements var hostNetworkPort int ginkgo.BeforeEach(func() { - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { - e2eskipper.Skipf("Advertising Layer2 UDNs is not currently supported in LGW") - } ginkgo.By("Configuring primary UDN namespaces") var err error udnNamespaceA, err = f.CreateNamespace(context.TODO(), f.BaseName, map[string]string{ @@ -700,9 +696,6 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }) ginkgo.AfterEach(func() { - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { - return - } gomega.Expect(f.ClientSet.CoreV1().Pods(udnNamespaceA.Name).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{})).To(gomega.Succeed()) gomega.Expect(f.ClientSet.CoreV1().Pods(udnNamespaceB.Name).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{})).To(gomega.Succeed()) @@ -779,7 +772,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.Logf("Connectivity check successful:'%s' -> %s", client, targetAddress) return out, nil } - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(0) + clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV4) asyncAssertion := gomega.Eventually timeout := time.Second * 30 @@ -800,7 +793,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" } if isIPv6Supported() && isIPv4Supported() { // use ipFamilyIndex of 1 to pick the IPv6 addresses - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(1) + clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV6) out, err := checkConnectivity(clientName, clientNamespace, dst) if expectErr != (err != nil) { return fmt.Errorf("expected connectivity check to return error(%t), got %v, output %v", expectErr, err, out) @@ -906,7 +899,16 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }), ginkgo.Entry("pod in the UDN should not be able to access a service in a different UDN", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetB.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true + err := true + out := curlConnectionTimeoutCode + if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { + // FIXME: prevent looping of traffic in L2 UDNs + // bad behaviour: packet is looping from management port -> breth0 -> GR -> management port -> breth0 and so on + // which is a never ending loop + // this causes curl timeout with code 7 host unreachable instead of code 28 + out = "" + } + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetB.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", out, err }), ginkgo.Entry("host to a local UDN pod should not work", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { @@ -963,14 +965,20 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" errBool := false out := "" if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - // FIXME: fix assymmetry in L2 UDNs - // bad behaviour: packet is coming from other node -> entering eth0 -> bretho and here kernel drops the packet since - // rp_filter is set to 1 in breth0 and there is an iprule that sends the packet to mpX interface so kernel sees the packet - // having return path different from the incoming interface. - // The SNAT to nodeIP should fix this. - // this causes curl timeout with code 28 - errBool = true - out = curlConnectionTimeoutCode + // FIXME: this should be removed once we add the SNAT for pod->node traffic + // We now permit asymmetric traffic on LGW. This prevents the issue from occurring with IPv6. + // However, for IPv4 LGW rp_filter is still blocking the replies. + // The situation is different on SGW as we don't allow asymmetric traffic at all, which is why IPv6 traffic fails there too. + if ipFamilyIndex == ipFamilyV4 || !isLocalGWModeEnabled() { + // FIXME: fix assymmetry in L2 UDNs + // bad behaviour: packet is coming from other node -> entering eth0 -> bretho and here kernel drops the packet since + // rp_filter is set to 1 in breth0 and there is an iprule that sends the packet to mpX interface so kernel sees the packet + // having return path different from the incoming interface. + // The SNAT to nodeIP should fix this. + // this causes curl timeout with code 28 + errBool = true + out = curlConnectionTimeoutCode + } } return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", out, errBool }), From dcc403c1ddf11e30e6990699616405f6dc47dd71 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Wed, 16 Jul 2025 14:59:56 +0200 Subject: [PATCH 110/181] Fix UDN to alien ClusterIP looping issue The isolation rules originally added here: d63887ed167da260d3f26c71ec06e520d89a4b0f redirect the traffic originated from a UDN back to the same UDNs patchport. This causes a following traffic loop for advertised L2 networks in LGW: 1. A UDN pod sends traffic to a service IP outside the UDN. 2. Traffic exits through the `ovn-mpX` port and is routed to `breth0`. 3. The following OpenFlow rules redirect it back to the UDN patch port: - `table=0,priority=550,ip,in_port=LOCAL,nw_src=,nw_dst=,actions=ct(commit,table=2,zone=64001)` - `table=2,priority=200,ip,nw_src= actions=...,output:""` 4. A route on the L2 gateway router sends the traffic back to `ovn-mpX`, restarting the loop. To fix this, the rule is changed to drop the traffic directly instead of redirecting it. Although currently this change is required only for advertised L2 networks in LGW the rule is changed for all scenarios to avoid introducing use-case specific behavior. Additionally, the priority of the packet marking flow is adjusted to remove any potential ambiguity. While this change could affect future support for host-networked UDN pods accessing ClusterIP services, it should be possible to re-use the existing per-UDN pkt marking approach. Signed-off-by: Patryk Diak --- go-controller/pkg/node/gateway_shared_intf.go | 25 +++++++++++-------- go-controller/pkg/node/gateway_udn_test.go | 24 +++++++++--------- test/e2e/route_advertisements.go | 22 ++-------------- 3 files changed, 28 insertions(+), 43 deletions(-) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index a8d3b81aa7..922a68a2bd 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -406,9 +406,9 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // Add flows for default network services that are accessible from UDN networks if util.IsNetworkSegmentationSupportEnabled() { - // The flow added below has a higher priority than the per UDN service flow: - // priority=200, table=2, ip, ip_src=169.254.0., actions=set_field:->eth_dst,output: - // This ordering ensures that traffic to UDN allowed default services goes to the the default patch port. + // The flow added below has a higher priority than the per UDN service isolation flow: + // priority=200, table=2, ip, ip_src=169.254.0., actions=drop + // This ordering ensures that traffic to UDN allowed default services goes to the default patch port. if util.IsUDNEnabledService(ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String()) { key = strings.Join([]string{"UDNAllowedSVC", service.Namespace, service.Name}, "_") @@ -1788,13 +1788,17 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st // Use the filtered subnets for the flow compute instead of the masqueradeIP srcIPOrSubnet = matchingIPFamilySubnet.String() } + + // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that + // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to + // a service in another UDN. dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, srcIPOrSubnet, - bridgeMacAddress, netConfig.ofPortPatch)) + "actions=drop", + defaultOpenFlowCookie, srcIPOrSubnet)) + dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, pkt_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", defaultOpenFlowCookie, netConfig.pktMark, bridgeMacAddress, netConfig.ofPortPatch)) @@ -1825,11 +1829,10 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st } dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, srcIPOrSubnet, - bridgeMacAddress, netConfig.ofPortPatch)) + "actions=drop", + defaultOpenFlowCookie, srcIPOrSubnet)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, pkt_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", defaultOpenFlowCookie, netConfig.pktMark, bridgeMacAddress, netConfig.ofPortPatch)) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 4d73529c86..575d8bc9c8 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -280,7 +280,7 @@ func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNCo Expect(nTable2Flows).To(Equal(1)) } -func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName, bridgeMAC string, svcCIDR *net.IPNet, expectedNFlows int) { +func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { By(fmt.Sprintf("Checking advertsised UDN %s service isolation flows for %s; expected %d flows", netName, svcCIDR.String(), expectedNFlows)) @@ -303,8 +303,8 @@ func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDN var nFlows int for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=set_field:%s->eth_dst,output:%s", - protoPrefix, protoPrefix, matchingIPFamilySubnet, bridgeMAC, netConfig.ofPortPatch)) { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, matchingIPFamilySubnet)) { nFlows++ } if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", @@ -316,7 +316,7 @@ func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDN Expect(nFlows).To(Equal(expectedNFlows)) } -func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName, bridgeMAC string, svcCIDR *net.IPNet, expectedNFlows int) { +func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", netName, svcCIDR.String(), expectedNFlows)) @@ -332,8 +332,8 @@ func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfigurat var nFlows int for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=set_field:%s->eth_dst,output:%s", - protoPrefix, protoPrefix, mgmtMasqIP, bridgeMAC, netConfig.ofPortPatch)) { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, mgmtMasqIP)) { nFlows++ } } @@ -797,7 +797,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 1) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -827,7 +827,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1028,7 +1028,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 1) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1058,7 +1058,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1269,7 +1269,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per advertised UDN for table 2 and table 0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 2) + checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1299,7 +1299,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 and table0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 66c258eeae..efdbdb9451 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -882,16 +882,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }), ginkgo.Entry("pod in the UDN should not be able to access a default network service", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - err := true - out := curlConnectionTimeoutCode - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - // FIXME: prevent looping of traffic in L2 UDNs - // bad behaviour: packet is looping from management port -> breth0 -> GR -> management port -> breth0 and so on - // which is a never ending loop - // this causes curl timeout with code 7 host unreachable instead of code 28 - out = "" - } - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetDefault.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", out, err + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetDefault.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true }), ginkgo.Entry("pod in the UDN should be able to access kapi in default network service", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { @@ -899,16 +890,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }), ginkgo.Entry("pod in the UDN should not be able to access a service in a different UDN", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - err := true - out := curlConnectionTimeoutCode - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { - // FIXME: prevent looping of traffic in L2 UDNs - // bad behaviour: packet is looping from management port -> breth0 -> GR -> management port -> breth0 and so on - // which is a never ending loop - // this causes curl timeout with code 7 host unreachable instead of code 28 - out = "" - } - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetB.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", out, err + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetB.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true }), ginkgo.Entry("host to a local UDN pod should not work", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { From a0101b5635a5b4e626df7928883b8b8c8c35d79a Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Fri, 25 Apr 2025 11:37:29 +0200 Subject: [PATCH 111/181] kv, e2e: Download virtctl at tests Signed-off-by: Enrique Llorente --- contrib/kind-common | 23 ------- test/e2e/kubevirt.go | 59 ++++++++-------- test/e2e/kubevirt/client.go | 128 +++++++++++++++++++++++++++++++++++ test/e2e/kubevirt/console.go | 32 ++++----- test/e2e/kubevirt/ip.go | 4 +- test/e2e/kubevirt/net.go | 8 +-- test/e2e/kubevirt/nmstate.go | 4 +- test/scripts/install-kind.sh | 3 - 8 files changed, 181 insertions(+), 80 deletions(-) create mode 100644 test/e2e/kubevirt/client.go diff --git a/contrib/kind-common b/contrib/kind-common index bbb7cda7e1..2a564dece0 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -388,30 +388,7 @@ install_kubevirt() { local kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") kubectl -n kubevirt patch kubevirt kubevirt --type=json --patch '[{"op":"add","path":"/spec/configuration/network","value":{}},{"op":"add","path":"/spec/configuration/network/binding","value":{"l2bridge":{"domainAttachmentType":"managedTap","migration":{}}}}]' - - if [ ! -d "./bin" ] - then - mkdir -p ./bin - if_error_exit "Failed to create bin dir!" - fi - - if [[ "$OSTYPE" == "linux-gnu" ]]; then - OS_TYPE="linux" - elif [[ "$OSTYPE" == "darwin"* ]]; then - OS_TYPE="darwin" - fi - - pushd ./bin - if [ ! -f ./virtctl ]; then - kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") - cli_name="virtctl-${kubevirt_stable_release_url##*/}-${OS_TYPE}-${ARCH}" - curl -LO "${kubevirt_stable_release_url}/${cli_name}" - mv ${cli_name} virtctl - if_error_exit "Failed to download virtctl!" - fi - popd - chmod +x ./bin/virtctl } install_cert_manager() { diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 4ae12854a2..5c6adca101 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -8,7 +8,6 @@ import ( "net" "net/netip" "os" - "os/exec" "strings" "sync" "time" @@ -103,6 +102,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun fr = wrappedTestFramework("kv-live-migration") d = diagnostics.New(fr) crClient crclient.Client + virtClient *kubevirt.Client namespace string iperf3DefaultPort = int32(5201) tcpServerPort = int32(9900) @@ -300,7 +300,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun output := "" Eventually(func() error { var err error - output, err = kubevirt.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) + output, err = virtClient.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) return err }). WithPolling(polling). @@ -316,7 +316,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun polling := 15 * time.Second for podName, serverPodIPs := range serverPodIPsByName { for _, serverPodIP := range serverPodIPs { - output, err := kubevirt.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) + output, err := virtClient.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) if err != nil { return fmt.Errorf("%s: %w", output, err) } @@ -361,7 +361,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun for _, podIP := range podIPs { iperfLogFile := fmt.Sprintf("/tmp/%s_%s_iperf3.log", podName, podIP) execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 2*time.Second) + return virtClient.RunCommand(vmi, cmd, 2*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -404,7 +404,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun startNorthSouthEgressIperfTraffic = func(vmi *kubevirtv1.VirtualMachineInstance, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 5*time.Second) + return virtClient.RunCommand(vmi, cmd, 5*time.Second) } return startNorthSouthIperfTraffic(execFn, addresses, port, "egress", stage) } @@ -431,7 +431,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/egress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 5*time.Second) + return virtClient.RunCommand(vmi, cmd, 5*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -446,7 +446,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun continue } cmd := fmt.Sprintf("ping -c 3 -W 2 %s", ip) - stdout, err := kubevirt.RunCommand(vmi, cmd, 5*time.Second) + stdout, err := virtClient.RunCommand(vmi, cmd, 5*time.Second) Expect(err).NotTo(HaveOccurred()) Expect(stdout).To(ContainSubstring(" 0% packet loss")) } @@ -514,7 +514,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun step = by(vmName, stage+": Check n/s tcp traffic") output := "" Eventually(func() error { - output, err = kubevirt.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) + output, err = virtClient.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) return err }). WithPolling(polling). @@ -730,7 +730,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun addressByFamily = func(familyFn func(iface kubevirt.Interface) []kubevirt.Address, vmi *kubevirtv1.VirtualMachineInstance) func() ([]kubevirt.Address, error) { return func() ([]kubevirt.Address, error) { - networkState, err := kubevirt.RetrieveNetworkState(vmi) + networkState, err := kubevirt.RetrieveNetworkState(virtClient, vmi) if err != nil { return nil, err } @@ -831,14 +831,14 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun addresses, err := addressByFamily(ipv4, vmi)() Expect(err).NotTo(HaveOccurred()) if isDualStack() { - output, err := kubevirt.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) + output, err := virtClient.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) Expect(err).NotTo(HaveOccurred(), output) step = by(vmi.Name, "Wait for virtual machine to receive IPv6 address from DHCP") Eventually(addressByFamily(ipv6, vmi)). WithPolling(time.Second). WithTimeout(5*time.Minute). Should(HaveLen(2), func() string { - output, _ := kubevirt.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) + output, _ := virtClient.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) return step + " -> journal nmstate: " + output }) ipv6Addresses, err := addressByFamily(ipv6, vmi)() @@ -1076,7 +1076,7 @@ passwd: } err := crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi) Expect(err).NotTo(HaveOccurred()) - Expect(kubevirt.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) waitVirtualMachineAddresses(vmi) @@ -1084,7 +1084,7 @@ passwd: svc, err := fr.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), composeService("tcpserver", vm.Name, tcpServerPort), metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred(), step) defer func() { - output, err := kubevirt.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) + output, err := virtClient.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) Expect(err).NotTo(HaveOccurred()) fmt.Printf("%s tcpserver logs: %s", vmi.Name, output) }() @@ -1334,6 +1334,9 @@ fi var err error crClient, err = newControllerRuntimeClient() Expect(err).NotTo(HaveOccurred()) + + virtClient, err = kubevirt.NewClient("/tmp") + Expect(err).NotTo(HaveOccurred()) }) Context("with default pod network", Ordered, func() { @@ -1533,7 +1536,7 @@ fi description: "restart", cmd: func() { By("Restarting vm") - output, err := exec.Command("virtctl", "restart", "-n", namespace, vmi.Name).CombinedOutput() + output, err := virtClient.RestartVirtualMachine(vmi) Expect(err).NotTo(HaveOccurred(), output) By("Wait some time to vmi conditions to catch up after restart") @@ -1811,7 +1814,7 @@ ip route add %[3]s via %[4]s step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return kubevirt.LoginToFedora(vmi, "fedora", "fedora") + return virtClient.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). @@ -1836,7 +1839,7 @@ ip route add %[3]s via %[4]s Expect(testPodsIPs).NotTo(BeEmpty()) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1855,13 +1858,13 @@ ip route add %[3]s via %[4]s expectedIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(nodeRunningVMI, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{expectedIPv6GatewayPath}), "should filter remote ipv6 gateway nexthop") } step = by(vmi.Name, fmt.Sprintf("Check north/south traffic before %s %s", td.resource.description, td.test.description)) - output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) + output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) @@ -1884,13 +1887,13 @@ ip route add %[3]s via %[4]s td.test.cmd() step = by(vmi.Name, fmt.Sprintf("Login to virtual machine after %s %s", td.resource.description, td.test.description)) - Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) obtainedAddresses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) Expect(obtainedAddresses).To(Equal(expectedAddreses)) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1900,7 +1903,7 @@ ip route add %[3]s via %[4]s // At restart we need re-connect Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) if td.role == udnv1.NetworkRolePrimary { - output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) + output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) } @@ -1928,7 +1931,7 @@ ip route add %[3]s via %[4]s Expect(err).NotTo(HaveOccurred(), step) Eventually(kubevirt.RetrieveCachedGatewayMAC). - WithArguments(vmi, "enp1s0", cidrIPv4). + WithArguments(virtClient, vmi, "enp1s0", cidrIPv4). WithTimeout(10*time.Second). WithPolling(time.Second). Should(Equal(expectedGatewayMAC), step) @@ -1942,7 +1945,7 @@ ip route add %[3]s via %[4]s targetNodeIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(targetNode, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{targetNodeIPv6GatewayPath}), "should reconcile ipv6 gateway nexthop after live migration") @@ -2248,14 +2251,14 @@ chpasswd: { expire: False } step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return kubevirt.LoginToFedora(vmi, "fedora", "fedora") + return virtClient.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Succeed(), step) step = by(vmi.Name, "Wait for cloud init to finish at first boot") - output, err := kubevirt.RunCommand(vmi, "cloud-init status --wait", time.Minute) + output, err := virtClient.RunCommand(vmi, "cloud-init status --wait", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) testPodsIPs := podsMultusNetworkIPs(iperfServerTestPods, podNetworkStatusByNetConfigPredicate(namespace, cudn.Name, strings.ToLower(string(cudn.Spec.Network.Localnet.Role)))) @@ -2272,13 +2275,13 @@ chpasswd: { expire: False } Expect(crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) step = by(vmi.Name, "Login to virtual machine after virtual machine instance live migration") - Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Check east/west traffic after virtual machine instance live migration") checkEastWestIperfTraffic(vmi, testPodsIPs, step) By("Stop iperf3 traffic before force killing vm, so iperf3 server do not get stuck") - output, err = kubevirt.RunCommand(vmi, "killall iperf3", 5*time.Second) + output, err = virtClient.RunCommand(vmi, "killall iperf3", 5*time.Second) Expect(err).ToNot(HaveOccurred(), output) step = by(vmi.Name, fmt.Sprintf("Force kill qemu at node %q where VM is running on", vmi.Status.NodeName)) @@ -2290,7 +2293,7 @@ chpasswd: { expire: False } Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) step = by(vmi.Name, "Login to virtual machine after virtual machine instance force killed") - Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Restart iperf traffic after forcing a vm failure") Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) diff --git a/test/e2e/kubevirt/client.go b/test/e2e/kubevirt/client.go new file mode 100644 index 0000000000..60c2cbcc2f --- /dev/null +++ b/test/e2e/kubevirt/client.go @@ -0,0 +1,128 @@ +package kubevirt + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + + retry "k8s.io/client-go/util/retry" + + kubevirtv1 "kubevirt.io/api/core/v1" +) + +type Client struct { + path string +} + +func NewClient(cliDir string) (*Client, error) { + // Ensure the virtctl directory exists. + if err := os.MkdirAll(cliDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create virtctl directory %q: %w", cliDir, err) + } + + // Ensure the virtctl executable is present. + if err := ensureVirtctl(cliDir); err != nil { + return nil, fmt.Errorf("failed to ensure virtctl: %w", err) + } + + return &Client{path: filepath.Join(cliDir, "virtctl")}, nil +} + +func (virtctl *Client) RestartVirtualMachine(vmi *kubevirtv1.VirtualMachineInstance) (string, error) { + output, err := exec.Command(virtctl.path, "restart", "-n", vmi.Namespace, vmi.Name).CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to restart VM: %w", err) + } + return string(output), nil +} + +func ensureVirtctl(cliDir string) error { + // Check if the "virtctl" executable exists in the specified path. + // If it does not exist, call the installVirtctl function. + if _, err := os.Stat(filepath.Join(cliDir, "virtctl")); os.IsNotExist(err) { + return installVirtctl(cliDir) + } else if err != nil { + return fmt.Errorf("error checking virtctl executable: %w", err) + } + return nil +} + +func downloadVirtctlBinary() (io.ReadCloser, error) { + // Fetch the latest stable version of KubeVirt from the stable.txt file. + stableResp, err := http.Get("https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt") + if err != nil { + return nil, fmt.Errorf("failed to fetch stable version: %w", err) + } + defer stableResp.Body.Close() + + // Check if the HTTP response status is OK. + if stableResp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to fetch stable version: received status code %d", stableResp.StatusCode) + } + + // Read the version from the response body. + versionBytes, err := io.ReadAll(stableResp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read stable version: %w", err) + } + version := strings.TrimSpace(string(versionBytes)) + + // Construct the download URL for the virtctl binary. + virtctlURL := fmt.Sprintf("https://github.com/kubevirt/kubevirt/releases/download/%s/virtctl-%s-linux-amd64", version, version) + + // Download the virtctl binary. + virtctlResp, err := http.Get(virtctlURL) + if err != nil { + return nil, fmt.Errorf("failed to download virtctl: %w", err) + } + + // Check if the HTTP response status is OK. + if virtctlResp.StatusCode != http.StatusOK { + // Close the body on error to prevent resource leaks + virtctlResp.Body.Close() + return nil, fmt.Errorf("failed to download virtctl: received status code %d", virtctlResp.StatusCode) + } + + return virtctlResp.Body, nil +} + +func installVirtctl(cliDir string) error { + var virtctlBody io.ReadCloser + allErrors := func(err error) bool { + return true + } + err := retry.OnError(retry.DefaultRetry, allErrors, func() error { + var downloadErr error + virtctlBody, downloadErr = downloadVirtctlBinary() + return downloadErr // Return the error if download failed, nil otherwise. + }) + if err != nil { + // If err is not nil here, it means all retries failed. + return err + } + defer virtctlBody.Close() // Ensure the body is closed + + // Save the binary to the specified directory. + cliPath := filepath.Join(cliDir, "virtctl") + outFile, err := os.Create(cliPath) + if err != nil { + return fmt.Errorf("failed to create virtctl file at %s: %w", cliPath, err) + } + defer outFile.Close() + + _, err = io.Copy(outFile, virtctlBody) + if err != nil { + return fmt.Errorf("failed to save virtctl binary to %s: %w", cliPath, err) + } + + // Make the binary executable. + if err := os.Chmod(cliPath, 0755); err != nil { + return fmt.Errorf("failed to make virtctl executable at %s: %w", cliPath, err) + } + + return nil +} diff --git a/test/e2e/kubevirt/console.go b/test/e2e/kubevirt/console.go index 822bd04162..4ca7533be8 100644 --- a/test/e2e/kubevirt/console.go +++ b/test/e2e/kubevirt/console.go @@ -54,21 +54,12 @@ var ( shellFailRegexp = regexp.MustCompile(shellFail) ) -// SafeExpectBatch runs the batch from `expected`, connecting to a VMI's console and -// waiting `wait` seconds for the batch to return. -// It validates that the commands arrive to the console. -// NOTE: This functions heritage limitations from `expectBatchWithValidatedSend` refer to it to check them. -func safeExpectBatch(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) error { - _, err := safeExpectBatchWithResponse(vmi, expected, timeout) - return err -} - // safeExpectBatchWithResponse runs the batch from `expected`, connecting to a VMI's console and // waiting `wait` seconds for the batch to return with a response. // It validates that the commands arrive to the console. // NOTE: This functions inherits limitations from `expectBatchWithValidatedSend`, refer to it for more information. -func safeExpectBatchWithResponse(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { - expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func safeExpectBatchWithResponse(virtctlPath string, vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { + expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return nil, err } @@ -81,8 +72,12 @@ func safeExpectBatchWithResponse(vmi *v1.VirtualMachineInstance, expected []expe return resp, err } -func RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { - results, err := safeExpectBatchWithResponse(vmi, []expect.Batcher{ +func (virtctl *Client) RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { + return runCommand(virtctl.path, vmi, command, timeout) +} + +func runCommand(virtctlPath string, vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { + results, err := safeExpectBatchWithResponse(virtctlPath, vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BExp{R: PromptExpression}, &expect.BSnd{S: command + "\n"}, @@ -114,10 +109,11 @@ func skipInput(scanner *bufio.Scanner) bool { // newExpecter will connect to an already logged in VMI console and return the generated expecter it will wait `timeout` for the connection. func newExpecter( + virtctlPath string, vmi *v1.VirtualMachineInstance, timeout time.Duration, opts ...expect.Option) (expect.Expecter, <-chan error, error) { - virtctlCmd := []string{"virtctl", "console", "-n", vmi.Namespace, vmi.Name} + virtctlCmd := []string{virtctlPath, "console", "-n", vmi.Namespace, vmi.Name} return expect.SpawnWithArgs(virtctlCmd, timeout, expect.SendTimeout(timeout), expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) } @@ -182,13 +178,13 @@ func expectBatchWithValidatedSend(expecter expect.Expecter, batch []expect.Batch return res, err } -func LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { - return LoginToFedoraWithHostname(vmi, user, password, vmi.Name) +func (virtctl *Client) LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { + return loginToFedoraWithHostname(virtctl.path, vmi, user, password, vmi.Name) } // LoginToFedora performs a console login to a Fedora base VM -func LoginToFedoraWithHostname(vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { - expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func loginToFedoraWithHostname(virtctlPath string, vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { + expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return err } diff --git a/test/e2e/kubevirt/ip.go b/test/e2e/kubevirt/ip.go index 180c7d252a..3e11bd9b92 100644 --- a/test/e2e/kubevirt/ip.go +++ b/test/e2e/kubevirt/ip.go @@ -8,7 +8,7 @@ import ( v1 "kubevirt.io/api/core/v1" ) -func RetrieveAllGlobalAddressesFromGuest(vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveAllGlobalAddressesFromGuest(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { ifaces := []struct { Name string `json:"ifname"` Addresses []struct { @@ -19,7 +19,7 @@ func RetrieveAllGlobalAddressesFromGuest(vmi *v1.VirtualMachineInstance) ([]stri } `json:"addr_info"` }{} - output, err := RunCommand(vmi, "ip -j a show", 2*time.Second) + output, err := cli.RunCommand(vmi, "ip -j a show", 2*time.Second) if err != nil { return nil, fmt.Errorf("failed retrieving adresses with ip command: %s: %w", output, err) } diff --git a/test/e2e/kubevirt/net.go b/test/e2e/kubevirt/net.go index 8c65118ae1..03b7e819ff 100644 --- a/test/e2e/kubevirt/net.go +++ b/test/e2e/kubevirt/net.go @@ -27,7 +27,7 @@ nmcli c mod %[1]s ipv4.addresses "" ipv6.addresses "" ipv4.gateway "" ipv6.gatew nmcli d reapply %[1]s`, iface) } -func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { +func RetrieveCachedGatewayMAC(cli *Client, vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { _, ipNet, err := net.ParseCIDR(cidr) if err != nil { return "", err @@ -35,7 +35,7 @@ func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr gatewayIP := util.GetNodeGatewayIfAddr(ipNet).IP.String() - output, err := RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) + output, err := cli.RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) if err != nil { return "", fmt.Errorf("%s: %v", output, err) } @@ -46,12 +46,12 @@ func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr return outputSplit[4], nil } -func RetrieveIPv6Gateways(vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveIPv6Gateways(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { routes := []struct { Gateway string `json:"gateway"` }{} - output, err := RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) + output, err := cli.RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/e2e/kubevirt/nmstate.go b/test/e2e/kubevirt/nmstate.go index 10e8e34108..bd852ca794 100644 --- a/test/e2e/kubevirt/nmstate.go +++ b/test/e2e/kubevirt/nmstate.go @@ -27,8 +27,8 @@ type NetworkState struct { Interfaces []Interface `json:"interfaces"` } -func RetrieveNetworkState(vmi *v1.VirtualMachineInstance) (*NetworkState, error) { - output, err := RunCommand(vmi, "nmstatectl show --json", 2*time.Second) +func RetrieveNetworkState(cli *Client, vmi *v1.VirtualMachineInstance) (*NetworkState, error) { + output, err := cli.RunCommand(vmi, "nmstatectl show --json", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/scripts/install-kind.sh b/test/scripts/install-kind.sh index d7674159e1..1b41646c7e 100755 --- a/test/scripts/install-kind.sh +++ b/test/scripts/install-kind.sh @@ -78,8 +78,5 @@ else ./kind.sh fi -if [ "$KIND_INSTALL_KUBEVIRT" == true ]; then - sudo mv ./bin/virtctl /usr/local/bin/virtctl -fi popd # go our of $SCRIPT_DIR/../../contrib From b60dbcd9dfbd7dfe26fe082b0e62ccea8c0744a9 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Fri, 25 Apr 2025 11:40:48 +0200 Subject: [PATCH 112/181] kv, e2e: ensure there is no dots at podtest name Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 5c6adca101..94e8ad7b31 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1132,11 +1132,15 @@ passwd: g.Expect(pod.Status.PodIP).NotTo(BeEmpty(), "pod %s has no valid IP address yet", pod.Name) } + sanitizeNodeName = func(nodeName string) string { + return strings.ReplaceAll(nodeName, ".", "-") + } + createHTTPServerPods = func(annotations map[string]string) []*corev1.Pod { var pods []*corev1.Pod for _, selectedNode := range selectedNodes { pod := composeAgnhostPod( - "testpod-"+selectedNode.Name, + "testpod-"+sanitizeNodeName(selectedNode.Name), namespace, selectedNode.Name, "netexec", "--http-port", "8000") @@ -1206,7 +1210,7 @@ fi IPRequest: staticIPs, } } - pod, err := createPod(fr, "testpod-"+node.Name, node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { + pod, err := createPod(fr, "testpod-"+sanitizeNodeName(node.Name), node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { if nse != nil { pod.Annotations = networkSelectionElements(*nse) } From 318782be67d347c18ffebe21b6bf6c1fa42a2770 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Tue, 29 Apr 2025 12:26:54 +0200 Subject: [PATCH 113/181] kv, e2e: Use the ExternalContainer struct instead of name Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 47 ++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 94e8ad7b31..6a05bb688b 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -393,10 +393,10 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun return nil } - startNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) error { + startNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) } return startNorthSouthIperfTraffic(execFn, addresses, port, "ingress", stage) } @@ -409,13 +409,13 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun return startNorthSouthIperfTraffic(execFn, addresses, port, "egress", stage) } - checkNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) { + checkNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) { GinkgoHelper() Expect(addresses).NotTo(BeEmpty()) for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/ingress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -1762,23 +1762,22 @@ write_files: iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, td.role, []string{}) Expect(err).NotTo(HaveOccurred()) - network, err := infraprovider.Get().PrimaryNetwork() - Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") - if containerNetwork := containerNetwork(td); containerNetwork != network.Name() { - network, err = infraprovider.Get().GetNetwork(containerNetwork) - Expect(err).ShouldNot(HaveOccurred(), "must to get alternative network") - } - externalContainerPort := infraprovider.Get().GetExternalContainerPort() - externalContainerName := namespace + "-iperf" - externalContainerSpec := infraapi.ExternalContainer{ - Name: externalContainerName, - Image: images.IPerf3(), - Network: network, - Args: []string{"sleep infinity"}, - ExtPort: externalContainerPort, + var externalContainer infraapi.ExternalContainer + if td.role == udnv1.NetworkRolePrimary { + primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() + Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") + externalContainerPort := infraprovider.Get().GetExternalContainerPort() + externalContainerName := namespace + "-iperf" + externalContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.IPerf3(), + Network: primaryProviderNetwork, + Args: []string{"sleep infinity"}, + ExtPort: externalContainerPort, + } + externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) + Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") } - externalContainer, err := providerCtx.CreateExternalContainer(externalContainerSpec) - Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") var externalContainerIPs []string if externalContainer.IsIPv4() { @@ -1870,8 +1869,8 @@ ip route add %[3]s via %[4]s step = by(vmi.Name, fmt.Sprintf("Check north/south traffic before %s %s", td.resource.description, td.test.description)) output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) - checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) + Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) + checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { _, err := infraprovider.Get().ExecExternalContainerCommand(externalContainer, []string{"bash", "-c", iperfServerScript}) @@ -1909,13 +1908,13 @@ ip route add %[3]s via %[4]s if td.role == udnv1.NetworkRolePrimary { output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) + Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) } } checkEastWestIperfTraffic(vmi, testPodsIPs, step) if td.role == udnv1.NetworkRolePrimary { step = by(vmi.Name, fmt.Sprintf("Check north/south traffic after %s %s", td.resource.description, td.test.description)) - checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) + checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { checkNorthSouthEgressIperfTraffic(vmi, externalContainerIPs, iperf3DefaultPort, step) From 7c1de13e6210cd2f5769f7d4094f168fc7f1b4ec Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Tue, 29 Apr 2025 11:17:21 +0200 Subject: [PATCH 114/181] e2e: Remove harcoded breth0 Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 2 +- test/e2e/localnet-underlay.go | 13 ++++++------- test/e2e/multihoming.go | 7 ++++--- test/e2e/node_ip_mac_migration.go | 6 +++--- test/e2e/util.go | 2 +- test/scripts/e2e-cp.sh | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 6a05bb688b..b687b1cc42 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1422,7 +1422,7 @@ fi Expect(err).NotTo(HaveOccurred()) d.ConntrackDumpingDaemonSet() - d.OVSFlowsDumpingDaemonSet("breth0") + d.OVSFlowsDumpingDaemonSet(deploymentconfig.Get().ExternalBridgeName()) d.IPTablesDumpingDaemonSet() bandwidthPerMigration := resource.MustParse("40Mi") diff --git a/test/e2e/localnet-underlay.go b/test/e2e/localnet-underlay.go index 03649143dd..97c06d0ecc 100644 --- a/test/e2e/localnet-underlay.go +++ b/test/e2e/localnet-underlay.go @@ -17,15 +17,14 @@ import ( ) const ( - defaultOvsBridge = "breth0" - secondaryBridge = "ovsbr1" - add = "add-br" - del = "del-br" + secondaryBridge = "ovsbr1" + add = "add-br" + del = "del-br" ) func setupUnderlay(ovsPods []v1.Pod, bridgeName, portName, networkName string, vlanID int) error { for _, ovsPod := range ovsPods { - if bridgeName != defaultOvsBridge { + if bridgeName != deploymentconfig.Get().ExternalBridgeName() { if err := addOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { return err } @@ -68,7 +67,7 @@ func ovsRemoveSwitchPort(ovsPods []v1.Pod, portName string, newVLANID int) error func teardownUnderlay(ovsPods []v1.Pod, bridgeName string) error { for _, ovsPod := range ovsPods { - if bridgeName != defaultOvsBridge { + if bridgeName != deploymentconfig.Get().ExternalBridgeName() { if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { return err } @@ -180,7 +179,7 @@ func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMap func defaultNetworkBridgeMapping() BridgeMapping { return BridgeMapping{ physnet: "physnet", - ovsBridge: "breth0", + ovsBridge: deploymentconfig.Get().ExternalBridgeName(), } } diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 46ad7eedc5..3fb940ba3e 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -31,6 +31,7 @@ import ( ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" ) const ( @@ -278,7 +279,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { port = 9000 ) - ginkgo.DescribeTable("attached to a localnet network mapped to breth0", + ginkgo.DescribeTable("attached to a localnet network mapped to external primary interface bridge", //nolint:lll func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration, isCollocatedPods bool) { By("Get two scheduable nodes and ensure client and server are located on distinct Nodes") @@ -309,9 +310,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(pods).NotTo(BeEmpty()) defer func() { By("tearing down the localnet underlay") - Expect(teardownUnderlay(pods, defaultOvsBridge)).To(Succeed()) + Expect(teardownUnderlay(pods, deploymentconfig.Get().ExternalBridgeName())).To(Succeed()) }() - Expect(setupUnderlay(pods, defaultOvsBridge, "", netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(setupUnderlay(pods, deploymentconfig.Get().ExternalBridgeName(), "", netConfig.networkName, netConfig.vlanID)).To(Succeed()) nad := generateNAD(netConfig) By(fmt.Sprintf("creating the attachment configuration: %v\n", nad)) diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index 0326c2c7b7..d7b12f4b24 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -454,7 +454,7 @@ spec: Expect(pods.Items).To(HaveLen(1)) ovnkPod = pods.Items[0] - cmd := "ovs-ofctl dump-flows breth0 table=0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -515,7 +515,7 @@ spec: time.Sleep(time.Duration(settleTimeout) * time.Second) By(fmt.Sprintf("Checking nodeport flows have been updated to use new IP: %s", migrationWorkerNodeIP)) - cmd := "ovs-ofctl dump-flows breth0 table=0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -628,7 +628,7 @@ func checkFlowsForMACPeriodically(ovnkPod v1.Pod, addr net.HardwareAddr, duratio } func checkFlowsForMAC(ovnkPod v1.Pod, mac net.HardwareAddr) error { - cmd := "ovs-ofctl dump-flows breth0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s", deploymentconfig.Get().ExternalBridgeName()) flowOutput := e2epodoutput.RunHostCmdOrDie(ovnkPod.Namespace, ovnkPod.Name, cmd) lines := strings.Split(flowOutput, "\n") for _, line := range lines { diff --git a/test/e2e/util.go b/test/e2e/util.go index 89ab1e12c9..e31bffe724 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1227,7 +1227,7 @@ func routeToNode(nodeName string, ips []string, mtu int, add bool) error { cmd = []string{"ip", "-6"} } var err error - cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", "breth0") + cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", deploymentconfig.Get().ExternalBridgeName()) if mtu != 0 { cmd = append(cmd, "mtu", strconv.Itoa(mtu)) } diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index 59fc1cd01a..cbccc5ee29 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -145,9 +145,9 @@ else # pod reached from default network through secondary interface, asymetric, configuration does not make sense # TODO: perhaps the secondary network attached pods should not be attached to default network - skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on the same node" - skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to breth0 can be reached by a client pod in the default network on a different node" - + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on the same node" + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on a different node" + # these tests require metallb but the configuration we do for it is not compatible with the configuration we do to advertise the default network # TODO: consolidate configuration skip "Load Balancer Service Tests with MetalLB" From f1c76a6197fc2c09da581a56f53ce2486dded360 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Tue, 29 Apr 2025 11:41:13 +0200 Subject: [PATCH 115/181] e2e, kv: Increase network status timeout Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index b687b1cc42..433749f5ba 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -472,7 +472,7 @@ var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, fun networkStatuses, err = podNetworkStatus(pod, networkStatusPredicate) return networkStatuses, err }). - WithTimeout(5 * time.Second). + WithTimeout(15 * time.Second). WithPolling(200 * time.Millisecond). Should(HaveLen(1)) for _, ip := range networkStatuses[0].IPs { From 9fed90c790221bd4b925ba2a4ad57784a1cbea3f Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Wed, 30 Apr 2025 10:43:35 +0200 Subject: [PATCH 116/181] e2e: Use ovnk allocator and reserve IPs This change replace the customa llocator with the ovnk allocator and also reserver the cluster IPs so test can just ask for another IP without the problem of cluster ip collision. Signed-off-by: Enrique Llorente --- test/e2e/ipalloc/ipalloc.go | 47 ------- test/e2e/ipalloc/primaryipalloc.go | 166 +++++------------------- test/e2e/ipalloc/primaryipalloc_test.go | 90 +++++-------- 3 files changed, 65 insertions(+), 238 deletions(-) delete mode 100644 test/e2e/ipalloc/ipalloc.go diff --git a/test/e2e/ipalloc/ipalloc.go b/test/e2e/ipalloc/ipalloc.go deleted file mode 100644 index 7decbaa0a1..0000000000 --- a/test/e2e/ipalloc/ipalloc.go +++ /dev/null @@ -1,47 +0,0 @@ -package ipalloc - -import ( - "fmt" - "math/big" - "net" -) - -type ipAllocator struct { - net *net.IPNet - // base is a cached version of the start IP in the CIDR range as a *big.Int - base *big.Int - // max is the maximum size of the usable addresses in the range - max int - count int -} - -func newIPAllocator(cidr *net.IPNet) *ipAllocator { - return &ipAllocator{net: cidr, base: getBaseInt(cidr.IP), max: limit(cidr)} -} - -func (n *ipAllocator) AllocateNextIP() (net.IP, error) { - if n.count >= n.max { - return net.IP{}, fmt.Errorf("limit of %d reached", n.max) - } - n.base.Add(n.base, big.NewInt(1)) - n.count += 1 - b := n.base.Bytes() - b = append(make([]byte, 16), b...) - return b[len(b)-16:], nil -} - -func getBaseInt(ip net.IP) *big.Int { - return big.NewInt(0).SetBytes(ip.To16()) -} - -func limit(subnet *net.IPNet) int { - ones, bits := subnet.Mask.Size() - if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { - return 0 - } - // limit to 2^8 (256) IPs for e2es - if bits == 128 && (bits-ones) >= 8 { - return int(1) << uint(8) - } - return int(1) << uint(bits-ones) -} diff --git a/test/e2e/ipalloc/primaryipalloc.go b/test/e2e/ipalloc/primaryipalloc.go index 79a0ae5010..1e7c34bb87 100644 --- a/test/e2e/ipalloc/primaryipalloc.go +++ b/test/e2e/ipalloc/primaryipalloc.go @@ -3,19 +3,20 @@ package ipalloc import ( "context" "fmt" + "net" + "sync" + + ipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/client-go/kubernetes/typed/core/v1" - "net" - "sync" ) // primaryIPAllocator attempts to allocate an IP in the same subnet as a nodes primary network type primaryIPAllocator struct { mu *sync.Mutex - v4 *ipAllocator - v6 *ipAllocator + v4 *ipallocator.Range + v6 *ipallocator.Range nodeClient v1.NodeInterface } @@ -47,91 +48,37 @@ func newPrimaryIPAllocator(nodeClient v1.NodeInterface) (*primaryIPAllocator, er if len(nodes.Items) == 0 { return ipa, fmt.Errorf("expected at least one node but found zero") } - // FIXME: the approach taken here to find the first node IP+mask and then to increment the second last octet wont work in - // all scenarios (node with /24). We should generate an EgressIP compatible with a Node providers primary network and then take care its unique globally. - // The approach here is to grab initial starting IP from first node found, increment the second last octet. - // Approach taken here won't work for Nodes handed /24 subnets. - nodePrimaryIPs, err := util.ParseNodePrimaryIfAddr(&nodes.Items[0]) - if err != nil { - return ipa, fmt.Errorf("failed to parse node primary interface address from Node object: %v", err) - } - if nodePrimaryIPs.V4.IP != nil { - // should be ok with /16 and /64 node primary provider subnets - // TODO; fixme; what about /24 subnet Nodes like GCP - nodePrimaryIPs.V4.IP[len(nodePrimaryIPs.V4.IP)-2]++ - ipa.v4 = newIPAllocator(&net.IPNet{IP: nodePrimaryIPs.V4.IP, Mask: nodePrimaryIPs.V4.Net.Mask}) - } - if nodePrimaryIPs.V6.IP != nil { - nodePrimaryIPs.V6.IP[len(nodePrimaryIPs.V6.IP)-2]++ - ipa.v6 = newIPAllocator(&net.IPNet{IP: nodePrimaryIPs.V6.IP, Mask: nodePrimaryIPs.V6.Net.Mask}) - } - // verify the new starting base IP is within all Nodes subnets - if nodePrimaryIPs.V4.IP != nil { - ipNets, err := getNodePrimaryProviderIPs(nodes.Items, false) - if err != nil { - return ipa, err - } - nextIP, err := ipa.v4.AllocateNextIP() - if err != nil { - return ipa, err - } - if !isIPWithinAllSubnets(ipNets, nextIP) { - return ipa, fmt.Errorf("IP %s is not within all Node subnets", nextIP) - } - } - if nodePrimaryIPs.V6.IP != nil { - ipNets, err := getNodePrimaryProviderIPs(nodes.Items, true) - if err != nil { - return ipa, err - } - nextIP, err := ipa.v6.AllocateNextIP() - if err != nil { - return ipa, err - } - if !isIPWithinAllSubnets(ipNets, nextIP) { - return ipa, fmt.Errorf("IP %s is not within all Node subnets", nextIP) - } - } - - return ipa, nil -} - -func getNodePrimaryProviderIPs(nodes []corev1.Node, isIPv6 bool) ([]*net.IPNet, error) { - ipNets := make([]*net.IPNet, 0, len(nodes)) - for _, node := range nodes { + for _, node := range nodes.Items { nodePrimaryIPs, err := util.ParseNodePrimaryIfAddr(&node) if err != nil { - return nil, fmt.Errorf("failed to parse node primary interface address from Node %s object: %v", node.Name, err) + return ipa, fmt.Errorf("failed to parse node primary interface address from Node %s object: %v", node.Name, err) + } + if nodePrimaryIPs.V4.IP != nil { + if ipa.v4 == nil { + ipa.v4, err = ipallocator.NewCIDRRange(nodePrimaryIPs.V4.Net) + if err != nil { + return ipa, fmt.Errorf("failed to create new CIDR range for IPv4: %v", err) + } + } + if err := ipa.v4.Allocate(nodePrimaryIPs.V4.IP); err != nil { + return ipa, fmt.Errorf("failed to allocate IPv4 %s: %v", nodePrimaryIPs.V4.IP, err) + } + } + if nodePrimaryIPs.V6.IP != nil { + if ipa.v6 == nil { + ipa.v6, err = ipallocator.NewCIDRRange(nodePrimaryIPs.V6.Net) + if err != nil { + return ipa, fmt.Errorf("failed to create new CIDR range for IPv6: %v", err) + } + } + if err := ipa.v6.Allocate(nodePrimaryIPs.V6.IP); err != nil { + return ipa, fmt.Errorf("failed to allocate IPv6 %s: %v", nodePrimaryIPs.V6.IP, err) + } } - var mask net.IPMask - var ip net.IP - if isIPv6 { - ip = nodePrimaryIPs.V6.IP - mask = nodePrimaryIPs.V6.Net.Mask - } else { - ip = nodePrimaryIPs.V4.IP - mask = nodePrimaryIPs.V4.Net.Mask - } - if len(ip) == 0 || len(mask) == 0 { - return nil, fmt.Errorf("failed to find Node %s primary Node IP and/or mask", node.Name) - } - ipNets = append(ipNets, &net.IPNet{IP: ip, Mask: mask}) } - return ipNets, nil -} - -func isIPWithinAllSubnets(ipNets []*net.IPNet, ip net.IP) bool { - if len(ipNets) == 0 { - return false - } - for _, ipNet := range ipNets { - if !ipNet.Contains(ip) { - return false - } - } - return true + return ipa, nil } func (pia *primaryIPAllocator) IncrementAndGetNextV4(times int) (net.IP, error) { @@ -148,12 +95,9 @@ func (pia *primaryIPAllocator) AllocateNextV4() (net.IP, error) { if pia.v4 == nil { return nil, fmt.Errorf("IPv4 is not enable ") } - if pia.v4.net == nil { - return nil, fmt.Errorf("IPv4 is not enabled but Allocation request was called") - } pia.mu.Lock() defer pia.mu.Unlock() - return allocateIP(pia.nodeClient, pia.v4.AllocateNextIP) + return pia.v4.AllocateNext() } func (pia *primaryIPAllocator) IncrementAndGetNextV6(times int) (net.IP, error) { @@ -170,51 +114,7 @@ func (pia primaryIPAllocator) AllocateNextV6() (net.IP, error) { if pia.v6 == nil { return nil, fmt.Errorf("IPv6 is not enabled but Allocation request was called") } - if pia.v6.net == nil { - return nil, fmt.Errorf("ipv6 network is not set") - } pia.mu.Lock() defer pia.mu.Unlock() - return allocateIP(pia.nodeClient, pia.v6.AllocateNextIP) -} - -type allocNextFn func() (net.IP, error) - -func allocateIP(nodeClient v1.NodeInterface, allocateFn allocNextFn) (net.IP, error) { - nodeList, err := nodeClient.List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to list nodes: %v", err) - } - for { - nextIP, err := allocateFn() - if err != nil { - return nil, fmt.Errorf("failed to allocated next IP address: %v", err) - } - firstOctet := nextIP[len(nextIP)-1] - // skip 0 and 1 - if firstOctet == 0 || firstOctet == 1 { - continue - } - isConflict, err := isConflictWithExistingHostIPs(nodeList.Items, nextIP) - if err != nil { - return nil, fmt.Errorf("failed to determine if IP conflicts with existing IPs: %v", err) - } - if !isConflict { - return nextIP, nil - } - } -} - -func isConflictWithExistingHostIPs(nodes []corev1.Node, ip net.IP) (bool, error) { - ipStr := ip.String() - for _, node := range nodes { - nodeIPsSet, err := util.ParseNodeHostCIDRsDropNetMask(&node) - if err != nil { - return false, fmt.Errorf("failed to parse node %s primary annotation info: %v", node.Name, err) - } - if nodeIPsSet.Has(ipStr) { - return true, nil - } - } - return false, nil + return pia.v6.AllocateNext() } diff --git a/test/e2e/ipalloc/primaryipalloc_test.go b/test/e2e/ipalloc/primaryipalloc_test.go index 815915b7ea..1702afe545 100644 --- a/test/e2e/ipalloc/primaryipalloc_test.go +++ b/test/e2e/ipalloc/primaryipalloc_test.go @@ -15,7 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" - utilsnet "k8s.io/utils/net" ) func TestUtilSuite(t *testing.T) { @@ -23,40 +22,6 @@ func TestUtilSuite(t *testing.T) { ginkgo.RunSpecs(t, "node ip alloc suite") } -func TestAllocateNext(t *testing.T) { - tests := []struct { - desc string - input *net.IPNet - output []net.IP - }{ - { - desc: "increments IPv4 address", - input: mustParseCIDRIncIP("192.168.1.5/16"), // mask /24 would fail - output: []net.IP{net.ParseIP("192.168.1.6"), net.ParseIP("192.168.1.7"), net.ParseIP("192.168.1.8")}, - }, - { - desc: "increments IPv6 address", - input: mustParseCIDRIncIP("fc00:f853:ccd:e793::6/64"), - output: []net.IP{net.ParseIP("fc00:f853:ccd:e793::7"), net.ParseIP("fc00:f853:ccd:e793::8"), net.ParseIP("fc00:f853:ccd:e793::9")}, - }, - } - - for i, tc := range tests { - t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { - nodeIPAlloc := newIPAllocator(tc.input) - for _, expectedIP := range tc.output { - allocatedIP, err := nodeIPAlloc.AllocateNextIP() - if err != nil { - t.Errorf("failed to allocated next IP: %v", err) - } - if !allocatedIP.Equal(expectedIP) { - t.Errorf("Expected IP %q, but got %q", expectedIP.String(), allocatedIP.String()) - } - } - }) - } -} - // mustParseCIDRIncIP parses the IP and CIDR. It adds the IP to the returned IPNet. func mustParseCIDRIncIP(cidr string) *net.IPNet { ip, ipNet, err := net.ParseCIDR(cidr) @@ -78,20 +43,19 @@ type node struct { } func TestIPAlloc(t *testing.T) { + g := gomega.NewWithT(t) + tests := []struct { - desc string - existingPrimaryNodeIPs []node - expectedFromAllocateNext []string + desc string + existingPrimaryNodeIPs []node }{ { - desc: "IPv4", - existingPrimaryNodeIPs: []node{{v4: network{ip: "192.168.1.1", mask: "16"}}, {v4: network{ip: "192.168.1.2", mask: "16"}}}, - expectedFromAllocateNext: []string{"192.168.2.3", "192.168.2.4"}, + desc: "IPv4", + existingPrimaryNodeIPs: []node{{v4: network{ip: "192.168.1.1", mask: "16"}}, {v4: network{ip: "192.168.1.2", mask: "16"}}}, }, { - desc: "IPv6", - existingPrimaryNodeIPs: []node{{v4: network{ip: "fc00:f853:ccd:e793::5", mask: "64"}}, {v4: network{ip: "fc00:f853:ccd:e793::6", mask: "64"}}}, - expectedFromAllocateNext: []string{"fc00:f853:ccd:e793::8", "fc00:f853:ccd:e793::9"}, + desc: "IPv6", + existingPrimaryNodeIPs: []node{{v6: network{ip: "fc00:f853:ccd:e793::5", mask: "64"}}, {v6: network{ip: "fc00:f853:ccd:e793::6", mask: "64"}}}, }, } @@ -103,23 +67,33 @@ func TestIPAlloc(t *testing.T) { t.Errorf(err.Error()) return } - for _, expectedIPStr := range tc.expectedFromAllocateNext { - expectedIP := net.ParseIP(expectedIPStr) - var nextIP net.IP - var err error - if utilsnet.IsIPv6(expectedIP) { - nextIP, err = pipa.AllocateNextV6() - } else { - nextIP, err = pipa.AllocateNextV4() - } - if err != nil || nextIP == nil { - t.Errorf("failed to allocated next IPv4 or IPv6 address. err %v", err) - return + existingIPv4IPs := []string{} + existingIPv6IPs := []string{} + allocatedIPv4IPs := []string{} + allocatedIPv6IPs := []string{} + for _, existingPrimaryNodeIP := range tc.existingPrimaryNodeIPs { + if existingPrimaryNodeIP.v4.ip != "" { + existingIPv4IPs = append(existingIPv4IPs, existingPrimaryNodeIP.v4.ip) + nextIPv4, err := pipa.AllocateNextV4() + g.Expect(err).ToNot(gomega.HaveOccurred(), "should succeed in allocating the next IPv4 address") + g.Expect(nextIPv4).ToNot(gomega.BeNil(), "should allocate next IPv4 address") + allocatedIPv4IPs = append(allocatedIPv4IPs, nextIPv4.String()) } - if !nextIP.Equal(expectedIP) { - t.Errorf("expected IP %q, but found %q", expectedIP, nextIP) + + if existingPrimaryNodeIP.v6.ip != "" { + existingIPv6IPs = append(existingIPv6IPs, existingPrimaryNodeIP.v6.ip) + nextIPv6, err := pipa.AllocateNextV6() + g.Expect(err).ToNot(gomega.HaveOccurred(), "should succeed in allocating the next IPv6 address") + g.Expect(nextIPv6).ToNot(gomega.BeNil(), "should allocate next IPv6 address") + allocatedIPv6IPs = append(allocatedIPv6IPs, nextIPv6.String()) } } + if len(existingIPv4IPs) > 0 { + g.Expect(allocatedIPv4IPs).NotTo(gomega.ContainElements(existingIPv4IPs)) + } + if len(existingIPv6IPs) > 0 { + g.Expect(allocatedIPv6IPs).NotTo(gomega.ContainElements(existingIPv6IPs)) + } }) } From 1870116b0bb2b4810441e33d1b63e1886d320bd7 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Wed, 28 May 2025 11:39:39 +0200 Subject: [PATCH 117/181] e2e, kv: Use bgpnet for external container network Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 433749f5ba..55e7309cf3 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1764,14 +1764,14 @@ write_files: var externalContainer infraapi.ExternalContainer if td.role == udnv1.NetworkRolePrimary { - primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() + providerNetwork, err := infraprovider.Get().GetNetwork(containerNetwork(td)) Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainerName := namespace + "-iperf" externalContainerSpec := infraapi.ExternalContainer{ Name: externalContainerName, Image: images.IPerf3(), - Network: primaryProviderNetwork, + Network: providerNetwork, Args: []string{"sleep infinity"}, ExtPort: externalContainerPort, } From ae5b6387fc94f6f860895ee69c643b16225a9731 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Tue, 29 Apr 2025 12:53:13 +0200 Subject: [PATCH 118/181] e2e: Move underlay setup to providers Signed-off-by: Enrique Llorente --- test/e2e/infraprovider/api/api.go | 18 ++ test/e2e/infraprovider/providers/kind/kind.go | 79 ++++++++ test/e2e/infraprovider/providers/kind/ovs.go | 93 +++++++++ test/e2e/kubevirt.go | 24 +-- test/e2e/localnet-underlay.go | 184 ------------------ test/e2e/multihoming.go | 133 +++++-------- test/e2e/network_segmentation_localnet.go | 38 ++-- 7 files changed, 263 insertions(+), 306 deletions(-) create mode 100644 test/e2e/infraprovider/providers/kind/ovs.go diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index 5ef104b7f3..545313ce8c 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" "strings" + + "k8s.io/kubernetes/test/e2e/framework" ) // Provider represents the infrastructure provider @@ -37,6 +39,21 @@ type Provider interface { GetK8HostPort() uint16 // supported K8 host ports } +// Underlay represents the configuration for an underlay network. +// Note: The physical network referenced by PhysicalNetworkName must be pre-created and available. +type Underlay struct { + // PhysicalNetworkName is the name of the pre-created physical network to use. + PhysicalNetworkName string + // LogicalNetworkName is the logical network name to be used. + LogicalNetworkName string + // BridgeName is the name of the bridge associated with the underlay. + BridgeName string + // PortName is the name of the port on the bridge. + PortName string + // VlanID is the VLAN identifier for the underlay network. + VlanID int +} + type Context interface { CreateExternalContainer(container ExternalContainer) (ExternalContainer, error) DeleteExternalContainer(container ExternalContainer) error @@ -46,6 +63,7 @@ type Context interface { AttachNetwork(network Network, instance string) (NetworkInterface, error) DetachNetwork(network Network, instance string) error GetAttachedNetworks() (Networks, error) + SetupUnderlay(f *framework.Framework, underlay Underlay) error AddCleanUpFn(func() error) } diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index f58a5bc746..ff5d1bdd45 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -13,10 +13,12 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/ovn-org/ovn-kubernetes/test/e2e/containerengine" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/portalloc" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" utilnet "k8s.io/utils/net" @@ -359,6 +361,83 @@ func (c *contextKind) getAttachedNetworks() (api.Networks, error) { return attachedNetworks, nil } +func (c *contextKind) SetupUnderlay(f *framework.Framework, underlay api.Underlay) error { + if underlay.LogicalNetworkName == "" { + return fmt.Errorf("underlay logical network name must be set") + } + + if underlay.PhysicalNetworkName == "" { + underlay.PhysicalNetworkName = "underlay" + } + + if underlay.BridgeName == "" { + underlay.BridgeName = secondaryBridge + } + + const ( + ovsKubeNodeLabel = "app=ovnkube-node" + ) + + ovsPodList, err := f.ClientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ovsKubeNodeLabel}, + ) + if err != nil { + return fmt.Errorf("failed to list OVS pods with label %q at namespace %q: %w", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace(), err) + } + + if len(ovsPodList.Items) == 0 { + return fmt.Errorf("no pods with label %q in namespace %q", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace()) + } + for _, ovsPod := range ovsPodList.Items { + if underlay.BridgeName != deploymentconfig.Get().ExternalBridgeName() { + underlayInterface, err := getNetworkInterface(ovsPod.Spec.NodeName, underlay.PhysicalNetworkName) + if err != nil { + return fmt.Errorf("failed to get underlay interface for network %s on node %s: %w", underlay.PhysicalNetworkName, ovsPod.Spec.NodeName, err) + } + c.AddCleanUpFn(func() error { + if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { + return fmt.Errorf("failed to remove OVS bridge %s for pod %s/%s during cleanup: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + return nil + }) + if err := ensureOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { + return fmt.Errorf("failed to add OVS bridge %s for pod %s/%s: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + + if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName); err != nil { + return fmt.Errorf("failed to attach port %s to bridge %s for pod %s/%s: %w", underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + if underlay.VlanID > 0 { + if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName, underlay.VlanID); err != nil { + return fmt.Errorf("failed to enable VLAN %d on port %s for bridge %s for pod %s/%s: %w", underlay.VlanID, underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + } + } + c.AddCleanUpFn(func() error { + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + ); err != nil { + return fmt.Errorf("failed to restore default bridge mappings for pod %s/%s during cleanup: %w", ovsPod.Namespace, ovsPod.Name, err) + } + return nil + }) + + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + bridgeMapping(underlay.LogicalNetworkName, underlay.BridgeName), + ); err != nil { + return fmt.Errorf("failed to configure bridge mappings for pod %s/%s for logical network %s to bridge %s: %w", ovsPod.Namespace, ovsPod.Name, underlay.LogicalNetworkName, underlay.BridgeName, err) + } + } + return nil + +} + func (c *contextKind) AddCleanUpFn(cleanUpFn func() error) { c.Lock() defer c.Unlock() diff --git a/test/e2e/infraprovider/providers/kind/ovs.go b/test/e2e/infraprovider/providers/kind/ovs.go new file mode 100644 index 0000000000..337ae4e702 --- /dev/null +++ b/test/e2e/infraprovider/providers/kind/ovs.go @@ -0,0 +1,93 @@ +package kind + +import ( + "fmt" + "strings" + "time" + + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" +) + +const ( + secondaryBridge = "ovsbr1" +) + +func ensureOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := fmt.Sprintf("ovs-vsctl br-exists %[1]s || ovs-vsctl add-br %[1]s", bridgeName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func removeOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := fmt.Sprintf("if ovs-vsctl br-exists %[1]s; then ovs-vsctl del-br %[1]s; fi", bridgeName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to remove ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { + cmd := fmt.Sprintf("ovs-vsctl list port %[2]s || ovs-vsctl add-port %[1]s %[2]s", bridgeName, portName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to addadd port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { + cmd := fmt.Sprintf("ovs-vsctl set port %[1]s tag=%[2]d vlan_mode=access", portName, vlanID) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to enable vlan access port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +type BridgeMapping struct { + physnet string + ovsBridge string +} + +func (bm BridgeMapping) String() string { + return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) +} + +type BridgeMappings []BridgeMapping + +func (bms BridgeMappings) String() string { + return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") +} + +func Map[T, V any](items []T, fn func(T) V) []V { + result := make([]V, len(items)) + for i, t := range items { + result[i] = fn(t) + } + return result +} + +func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { + mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) + cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) + } + return nil +} + +func defaultNetworkBridgeMapping() BridgeMapping { + return BridgeMapping{ + physnet: "physnet", + ovsBridge: deploymentconfig.Get().ExternalBridgeName(), + } +} + +func bridgeMapping(physnet, ovsBridge string) BridgeMapping { + return BridgeMapping{ + physnet: physnet, + ovsBridge: ovsBridge, + } +} diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 55e7309cf3..6410f16087 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1721,17 +1721,7 @@ write_files: if td.topology == udnv1.NetworkTopologyLocalnet { By("setting up the localnet underlay") - nodes := ovsPods(clientSet) - Expect(nodes).NotTo(BeEmpty()) - DeferCleanup(func() { - if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - } - }) - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) } createCUDN(cudn) @@ -2209,20 +2199,10 @@ chpasswd: { expire: False } ) DescribeTable("should maintain tcp connection with minimal downtime", func(td func(vmi *kubevirtv1.VirtualMachineInstance)) { By("setting up the localnet underlay") - nodes := ovsPods(clientSet) - Expect(nodes).NotTo(BeEmpty()) - DeferCleanup(func() { - if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - } - }) - cudn, networkName := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLocalnet, udnv1.NetworkRoleSecondary, udnv1.DualStackCIDRs{}) createCUDN(cudn) - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) workerNodeList, err := fr.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: labels.FormatLabels(map[string]string{"node-role.kubernetes.io/worker": ""})}) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/localnet-underlay.go b/test/e2e/localnet-underlay.go index 97c06d0ecc..8beed9c1ba 100644 --- a/test/e2e/localnet-underlay.go +++ b/test/e2e/localnet-underlay.go @@ -1,195 +1,11 @@ package e2e import ( - "context" "fmt" "os" "os/exec" - "strings" - "time" - - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" ) -const ( - secondaryBridge = "ovsbr1" - add = "add-br" - del = "del-br" -) - -func setupUnderlay(ovsPods []v1.Pod, bridgeName, portName, networkName string, vlanID int) error { - for _, ovsPod := range ovsPods { - if bridgeName != deploymentconfig.Get().ExternalBridgeName() { - if err := addOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { - return err - } - - if vlanID > 0 { - if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, bridgeName, portName, vlanID); err != nil { - return err - } - } else { - if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, bridgeName, portName); err != nil { - return err - } - } - } - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - bridgeMapping(networkName, bridgeName), - ); err != nil { - return err - } - } - return nil -} - -func ovsRemoveSwitchPort(ovsPods []v1.Pod, portName string, newVLANID int) error { - for _, ovsPod := range ovsPods { - if err := ovsRemoveVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName); err != nil { - return fmt.Errorf("failed to remove old VLAN port: %v", err) - } - - if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName, newVLANID); err != nil { - return fmt.Errorf("failed to add new VLAN port: %v", err) - } - } - - return nil -} - -func teardownUnderlay(ovsPods []v1.Pod, bridgeName string) error { - for _, ovsPod := range ovsPods { - if bridgeName != deploymentconfig.Get().ExternalBridgeName() { - if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { - return err - } - } - // restore default bridge mapping - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - ); err != nil { - return err - } - } - return nil -} - -func ovsPods(clientSet clientset.Interface) []v1.Pod { - const ( - ovsNodeLabel = "app=ovs-node" - ) - pods, err := clientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( - context.Background(), - metav1.ListOptions{LabelSelector: ovsNodeLabel}, - ) - if err != nil { - return nil - } - return pods.Items -} - -func addOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := strings.Join([]string{"ovs-vsctl", add, bridgeName}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func removeOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := strings.Join([]string{"ovs-vsctl", del, bridgeName}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "add-port", bridgeName, portName, - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "add-port", bridgeName, portName, fmt.Sprintf("tag=%d", vlanID), "vlan_mode=access", - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -func ovsRemoveVLANAccessPort(podNamespace, podName string, bridgeName string, portName string) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "del-port", bridgeName, portName, - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -type BridgeMapping struct { - physnet string - ovsBridge string -} - -func (bm BridgeMapping) String() string { - return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) -} - -type BridgeMappings []BridgeMapping - -func (bms BridgeMappings) String() string { - return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") -} - -func Map[T, V any](items []T, fn func(T) V) []V { - result := make([]V, len(items)) - for i, t := range items { - result[i] = fn(t) - } - return result -} - -func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { - mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) - cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) - } - return nil -} - -func defaultNetworkBridgeMapping() BridgeMapping { - return BridgeMapping{ - physnet: "physnet", - ovsBridge: deploymentconfig.Get().ExternalBridgeName(), - } -} - -func bridgeMapping(physnet, ovsBridge string) BridgeMapping { - return BridgeMapping{ - physnet: physnet, - ovsBridge: ovsBridge, - } -} - // TODO: make this function idempotent; use golang netlink instead func createVLANInterface(deviceName string, vlanID string, ipAddress *string) error { vlan := vlanName(deviceName, vlanID) diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 3fb940ba3e..be949e799e 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -32,6 +32,8 @@ import ( ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" ) const ( @@ -56,9 +58,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { f := wrappedTestFramework("multi-homing") var ( - cs clientset.Interface - nadClient nadclient.K8sCniCncfIoV1Interface - mnpClient mnpclient.K8sCniCncfIoV1beta1Interface + cs clientset.Interface + nadClient nadclient.K8sCniCncfIoV1Interface + mnpClient mnpclient.K8sCniCncfIoV1beta1Interface + providerCtx infraapi.Context ) BeforeEach(func() { @@ -69,6 +72,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(err).NotTo(HaveOccurred()) mnpClient, err = mnpclient.NewForConfig(f.ClientConfig()) Expect(err).NotTo(HaveOccurred()) + providerCtx = infraprovider.Get().NewTestContext() }) Context("A single pod with an OVN-K secondary network", func() { @@ -80,8 +84,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { if netConfig.topology == "localnet" { By("applying ovs bridge mapping") - Expect(setBridgeMappings(cs, defaultNetworkBridgeMapping(), bridgeMapping(netConfig.networkName, secondaryBridge))).NotTo(HaveOccurred()) - ginkgo.DeferCleanup(setBridgeMappings, cs, defaultNetworkBridgeMapping()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } By("creating the attachment configuration") @@ -306,13 +312,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { } By("setting up the localnet underlay") - pods := ovsPods(cs) - Expect(pods).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(pods, deploymentconfig.Get().ExternalBridgeName())).To(Succeed()) - }() - Expect(setupUnderlay(pods, deploymentconfig.Get().ExternalBridgeName(), "", netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + BridgeName: deploymentconfig.Get().ExternalBridgeName(), + LogicalNetworkName: netConfig.networkName, + })).To(Succeed()) nad := generateNAD(netConfig) By(fmt.Sprintf("creating the attachment configuration: %v\n", nad)) @@ -547,16 +550,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { serverPodConfig.namespace = f.Namespace.Name if netConfig.topology == "localnet" { - By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } By("creating the attachment configuration") @@ -902,17 +899,15 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("localnet OVN-K secondary network", func() { const ( - clientPodName = "client-pod" - nodeHostnameKey = "kubernetes.io/hostname" - servicePort uint16 = 9000 - dockerNetworkName = "underlay" - underlayServiceIP = "60.128.0.1" - secondaryInterfaceName = "eth1" - expectedOriginalMTU = 1200 + clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" + servicePort uint16 = 9000 + dockerNetworkName = "underlay" + underlayServiceIP = "60.128.0.1" + expectedOriginalMTU = 1200 ) var netConfig networkAttachmentConfig - var nodes []v1.Pod var underlayBridgeName string var cmdWebServer *exec.Cmd @@ -931,9 +926,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }) By("setting up the localnet underlay") - nodes = ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) }) BeforeEach(func() { @@ -982,11 +978,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(err).NotTo(HaveOccurred()) }) - AfterEach(func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }) - It("correctly sets the MTU on the pod", func() { Eventually(func() error { clientPodConfig := podConfiguration{ @@ -1114,7 +1105,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("and the service connected to the underlay is reconfigured to connect to the new VLAN-ID", func() { BeforeEach(func() { - Expect(ovsRemoveSwitchPort(nodes, secondaryInterfaceName, newLocalnetVLANID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: newLocalnetVLANID, + })).To(Succeed(), "configuring the OVS bridge with new localnet vlan id") }) It("can now communicate over a localnet secondary network from pod to the underlay service", func() { @@ -1304,9 +1298,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Context("with a trunked configuration", func() { const vlanID = 20 BeforeEach(func() { - nodes = ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - // we are setting up the bridge in trunked mode by not // specifying a particular VLAN ID on the network conf netConfig = newNetworkAttachmentConfig( @@ -1319,7 +1310,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }) By("setting up the localnet underlay with a trunked configuration") - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed(), "configuring the OVS bridge") + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed(), "configuring the OVS bridge") By(fmt.Sprintf("creating a VLAN interface on top of the bridge connecting the cluster nodes with IP: %s", underlayIP)) cli, err := client.NewClientWithOpts(client.FromEnv) @@ -1344,7 +1338,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { AfterEach(func() { Expect(cmdWebServer.Process.Kill()).NotTo(HaveOccurred(), "kill the python webserver") Expect(deleteVLANInterface(underlayBridgeName, strconv.Itoa(vlanID))).NotTo(HaveOccurred(), "remove the underlay physical configuration") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed(), "tear down the localnet underlay") }) It("the same bridge mapping can be shared by a separate VLAN by using the physical network name attribute", func() { @@ -1424,15 +1417,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { if netConfig.topology == "localnet" { By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1851,14 +1839,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1983,14 +1967,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -2285,18 +2265,3 @@ func addIPRequestToPodConfig(cs clientset.Interface, podConfig *podConfiguration } return nil } - -func setBridgeMappings(cs clientset.Interface, mappings ...BridgeMapping) error { - pods := ovsPods(cs) - if len(pods) == 0 { - return fmt.Errorf("pods list is empty") - } - - for _, pods := range pods { - if err := configureBridgeMappings(pods.Namespace, pods.Name, mappings...); err != nil { - return err - } - } - - return nil -} diff --git a/test/e2e/network_segmentation_localnet.go b/test/e2e/network_segmentation_localnet.go index 1647baa9fa..3acd6b1c20 100644 --- a/test/e2e/network_segmentation_localnet.go +++ b/test/e2e/network_segmentation_localnet.go @@ -9,6 +9,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,20 +23,26 @@ import ( ) var _ = Describe("Network Segmentation: Localnet", func() { - f := wrappedTestFramework("network-segmentation-localnet") + var ( + f = wrappedTestFramework("network-segmentation-localnet") + providerCtx infraapi.Context + ) f.SkipNamespaceCreation = true + BeforeEach(func() { + providerCtx = infraprovider.Get().NewTestContext() + }) + It("using ClusterUserDefinedNetwork CR, pods in different namespaces, should communicate over localnet topology", func() { const ( - vlan = 200 - testPort = 9000 - subnetIPv4 = "192.168.100.0/24" - subnetIPv6 = "2001:dbb::/64" - excludeSubnetIPv4 = "192.168.100.0/29" - excludeSubnetIPv6 = "2001:dbb::/120" - secondaryIfaceName = "eth1" - ovsBrName = "ovsbr-eth1" + vlan = 200 + testPort = 9000 + subnetIPv4 = "192.168.100.0/24" + subnetIPv6 = "2001:dbb::/64" + excludeSubnetIPv4 = "192.168.100.0/29" + excludeSubnetIPv6 = "2001:dbb::/120" ) + ovsBrName := "ovsbr-udn" // use unique names to avoid conflicts with tests running in parallel nsBlue := uniqueMetaName("blue") nsRed := uniqueMetaName("red") @@ -42,14 +50,12 @@ var _ = Describe("Network Segmentation: Localnet", func() { physicalNetworkName := uniqueMetaName("localnet1") By("setup the localnet underlay") - ovsPods := ovsPods(f.ClientSet) - Expect(ovsPods).NotTo(BeEmpty()) - DeferCleanup(func() { - By("teardown the localnet underlay") - Expect(teardownUnderlay(ovsPods, ovsBrName)).To(Succeed()) - }) c := networkAttachmentConfig{networkAttachmentConfigParams: networkAttachmentConfigParams{networkName: physicalNetworkName, vlanID: vlan}} - Expect(setupUnderlay(ovsPods, ovsBrName, secondaryIfaceName, c.networkName, c.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + BridgeName: ovsBrName, + LogicalNetworkName: c.networkName, + VlanID: c.vlanID, + })).To(Succeed()) By("create test namespaces") _, err := f.ClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsRed}}, metav1.CreateOptions{}) From 115b25a3e4ee0cc4e9d352474492552db390ed08 Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Mon, 16 Jun 2025 10:36:22 +0200 Subject: [PATCH 119/181] e2e: Move http servers to external container Signed-off-by: Enrique Llorente --- test/e2e/infraprovider/api/api.go | 15 +-- test/e2e/infraprovider/providers/kind/kind.go | 3 + test/e2e/localnet-underlay.go | 51 --------- test/e2e/multihoming.go | 105 ++++++++---------- 4 files changed, 55 insertions(+), 119 deletions(-) diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index 545313ce8c..c654f798c3 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -182,13 +182,14 @@ func (n NetworkInterface) GetMAC() string { } type ExternalContainer struct { - Name string - Image string - Network Network - Args []string - ExtPort uint16 - IPv4 string - IPv6 string + Name string + Image string + Network Network + Entrypoint string + Args []string + ExtPort uint16 + IPv4 string + IPv6 string } func (ec ExternalContainer) GetName() string { diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index ff5d1bdd45..4d0dc6a226 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -147,6 +147,9 @@ func (c *contextKind) createExternalContainer(container api.ExternalContainer) ( return container, fmt.Errorf("container %s already exists", container.Name) } cmd := []string{"run", "-itd", "--privileged", "--name", container.Name, "--network", container.Network.Name(), "--hostname", container.Name} + if container.Entrypoint != "" { + cmd = append(cmd, "--entrypoint", container.Entrypoint) + } cmd = append(cmd, container.Image) if len(container.Args) > 0 { cmd = append(cmd, container.Args...) diff --git a/test/e2e/localnet-underlay.go b/test/e2e/localnet-underlay.go index 8beed9c1ba..df8caf702f 100644 --- a/test/e2e/localnet-underlay.go +++ b/test/e2e/localnet-underlay.go @@ -1,52 +1 @@ package e2e - -import ( - "fmt" - "os" - "os/exec" -) - -// TODO: make this function idempotent; use golang netlink instead -func createVLANInterface(deviceName string, vlanID string, ipAddress *string) error { - vlan := vlanName(deviceName, vlanID) - cmd := exec.Command("sudo", "ip", "link", "add", "link", deviceName, "name", vlan, "type", "vlan", "id", vlanID) - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to create vlan interface %s: %v", vlan, err) - } - - cmd = exec.Command("sudo", "ip", "link", "set", "dev", vlan, "up") - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to enable vlan interface %s: %v", vlan, err) - } - - if ipAddress != nil { - cmd = exec.Command("sudo", "ip", "addr", "add", *ipAddress, "dev", vlan) - cmd.Stderr = os.Stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to define the vlan interface %q IP Address %s: %v", vlan, *ipAddress, err) - } - } - return nil -} - -// TODO: make this function idempotent; use golang netlink instead -func deleteVLANInterface(deviceName string, vlanID string) error { - vlan := vlanName(deviceName, vlanID) - cmd := exec.Command("sudo", "ip", "link", "del", vlan) - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to delete vlan interface %s: %v", vlan, err) - } - return nil -} - -func vlanName(deviceName string, vlanID string) string { - // MAX IFSIZE 16; got to truncate it to add the vlan suffix - if len(deviceName)+len(vlanID)+1 > 16 { - deviceName = deviceName[:len(deviceName)-len(vlanID)-1] - } - return fmt.Sprintf("%s.%s", deviceName, vlanID) -} diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index be949e799e..3ad1dd46e7 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -5,9 +5,6 @@ import ( "errors" "fmt" "net/netip" - "os" - "os/exec" - "strconv" "strings" "time" @@ -16,7 +13,6 @@ import ( . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" - "github.com/docker/docker/client" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +28,7 @@ import ( ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" ) @@ -907,9 +904,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { expectedOriginalMTU = 1200 ) - var netConfig networkAttachmentConfig - var underlayBridgeName string - var cmdWebServer *exec.Cmd + var ( + netConfig networkAttachmentConfig + ) underlayIP := underlayServiceIP + "/24" Context("with a service running on the underlay", func() { @@ -932,28 +929,23 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { })).To(Succeed()) }) - BeforeEach(func() { - By("adding IP to the underlay docker bridge") - cli, err := client.NewClientWithOpts(client.FromEnv) - Expect(err).NotTo(HaveOccurred()) - - gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) - Expect(err).NotTo(HaveOccurred()) - - underlayBridgeName, err = findInterfaceByIP(gatewayIP) - Expect(err).NotTo(HaveOccurred()) - - cmd := exec.Command("sudo", "ip", "addr", "add", underlayIP, "dev", underlayBridgeName) - cmd.Stderr = os.Stderr - err = cmd.Run() - Expect(err).NotTo(HaveOccurred()) - }) - BeforeEach(func() { By("starting a service, connected to the underlay") - cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(int(servicePort))) - cmdWebServer.Stderr = os.Stderr - Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") + providerCtx = infraprovider.Get().NewTestContext() + + underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) + Expect(err).NotTo(HaveOccurred(), "must get underlay network") + externalContainerName := f.Namespace.Name + "-web-server" + serviceContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.AgnHost(), + Network: underlayNetwork, + Entrypoint: "bash", + Args: []string{"-c", fmt.Sprintf("ip a add %s/24 dev eth0 && ./agnhost netexec --http-port=%d", underlayServiceIP, servicePort)}, + ExtPort: servicePort, + } + _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) + Expect(err).NotTo(HaveOccurred(), "must create external container 1") }) BeforeEach(func() { @@ -966,18 +958,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Expect(err).NotTo(HaveOccurred()) }) - AfterEach(func() { - err := cmdWebServer.Process.Kill() - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - cmd := exec.Command("sudo", "ip", "addr", "del", underlayIP, "dev", underlayBridgeName) - cmd.Stderr = os.Stderr - err := cmd.Run() - Expect(err).NotTo(HaveOccurred()) - }) - It("correctly sets the MTU on the pod", func() { Eventually(func() error { clientPodConfig := podConfiguration{ @@ -1008,6 +988,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { By("asserting the *client* pod can contact the underlay service") Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) + }) Context("and networkAttachmentDefinition is modified", func() { @@ -1315,29 +1296,30 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { VlanID: netConfig.vlanID, })).To(Succeed(), "configuring the OVS bridge") - By(fmt.Sprintf("creating a VLAN interface on top of the bridge connecting the cluster nodes with IP: %s", underlayIP)) - cli, err := client.NewClientWithOpts(client.FromEnv) - Expect(err).NotTo(HaveOccurred()) - - gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) - Expect(err).NotTo(HaveOccurred()) - - underlayBridgeName, err = findInterfaceByIP(gatewayIP) - Expect(err).NotTo(HaveOccurred()) - Expect(createVLANInterface(underlayBridgeName, strconv.Itoa(vlanID), &underlayIP)).To( - Succeed(), - "create a VLAN interface on the bridge interconnecting the cluster nodes", - ) - - By("starting a service, connected to the underlay") - cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(port)) - cmdWebServer.Stderr = os.Stderr - Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") - }) + By("starting a service, connected to the underlay over a VLAN") + providerCtx = infraprovider.Get().NewTestContext() + + ifName := "eth0" + vlanName := fmt.Sprintf("%s.%d", ifName, vlanID) + underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) + Expect(err).NotTo(HaveOccurred(), "must get underlay network") + externalContainerName := f.Namespace.Name + "-web-server" + serviceContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.AgnHost(), + Network: underlayNetwork, + Entrypoint: "bash", + ExtPort: servicePort, + Args: []string{"-c", fmt.Sprintf(` +ip link add link %[1]s name %[2]s type vlan id %[3]d +ip link set dev %[2]s up +ip a add %[4]s/24 dev %[2]s +./agnhost netexec --http-port=%[5]d +`, ifName, vlanName, vlanID, underlayServiceIP, servicePort)}, + } + _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) + Expect(err).NotTo(HaveOccurred(), "must create external container 1") - AfterEach(func() { - Expect(cmdWebServer.Process.Kill()).NotTo(HaveOccurred(), "kill the python webserver") - Expect(deleteVLANInterface(underlayBridgeName, strconv.Itoa(vlanID))).NotTo(HaveOccurred(), "remove the underlay physical configuration") }) It("the same bridge mapping can be shared by a separate VLAN by using the physical network name attribute", func() { @@ -1370,6 +1352,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { By(fmt.Sprintf("asserting the *client* pod can contact the underlay service with IP %q on the separate vlan", underlayIP)) Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) + }) }) }) From 956981a0f102ab6d34b32a9e3b9aed1c5d18f05f Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Wed, 25 Jun 2025 12:12:39 +0200 Subject: [PATCH 120/181] kv, e2e: Use PrimaryNetwork() Signed-off-by: Enrique Llorente --- test/e2e/kubevirt.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 6410f16087..839301ae11 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1671,11 +1671,11 @@ write_files: ingress string } var ( - containerNetwork = func(td testData) string { + containerNetwork = func(td testData) (infraapi.Network, error) { if td.ingress == "routed" { - return "bgpnet" + return infraprovider.Get().GetNetwork("bgpnet") } - return "kind" + return infraprovider.Get().PrimaryNetwork() } exposeVMIperfServer = func(td testData, vmi *kubevirtv1.VirtualMachineInstance, vmiAddresses []string) ([]string, int32) { GinkgoHelper() @@ -1754,7 +1754,7 @@ write_files: var externalContainer infraapi.ExternalContainer if td.role == udnv1.NetworkRolePrimary { - providerNetwork, err := infraprovider.Get().GetNetwork(containerNetwork(td)) + providerNetwork, err := containerNetwork(td) Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainerName := namespace + "-iperf" @@ -1780,8 +1780,8 @@ write_files: if td.ingress == "routed" { // pre=created test dependency and therefore we dont delete frrExternalContainer := infraapi.ExternalContainer{Name: "frr"} - frrNetwork, err := infraprovider.Get().GetNetwork(containerNetwork(td)) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to fetch network %q: %v", containerNetwork(td), err)) + frrNetwork, err := containerNetwork(td) + Expect(err).NotTo(HaveOccurred()) frrExternalContainerInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(frrExternalContainer, frrNetwork) Expect(err).NotTo(HaveOccurred(), "must fetch FRR container network interface attached to secondary network") From f1a4b4b0fd9b4ce643aaf1f8edb4ed960d46499a Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 12:10:39 +0200 Subject: [PATCH 121/181] [node/egressipgw] Move egressIP functionality to its own package. Make required objects public Signed-off-by: Nadia Pinaeva --- .../node/{ => egressip}/gateway_egressip.go | 60 +++++++++---------- .../egressip/gateway_egressip_suite_test.go | 13 ++++ .../{ => egressip}/gateway_egressip_test.go | 36 +++++------ go-controller/pkg/node/gateway.go | 15 ++--- go-controller/pkg/node/gateway_shared_intf.go | 3 +- 5 files changed, 71 insertions(+), 56 deletions(-) rename go-controller/pkg/node/{ => egressip}/gateway_egressip.go (91%) create mode 100644 go-controller/pkg/node/egressip/gateway_egressip_suite_test.go rename go-controller/pkg/node/{ => egressip}/gateway_egressip_test.go (95%) diff --git a/go-controller/pkg/node/gateway_egressip.go b/go-controller/pkg/node/egressip/gateway_egressip.go similarity index 91% rename from go-controller/pkg/node/gateway_egressip.go rename to go-controller/pkg/node/egressip/gateway_egressip.go index 13e41c4542..38bd2b058e 100644 --- a/go-controller/pkg/node/gateway_egressip.go +++ b/go-controller/pkg/node/egressip/gateway_egressip.go @@ -1,4 +1,4 @@ -package node +package egressip import ( "encoding/json" @@ -75,15 +75,15 @@ func (e markIPs) containsIP(ip net.IP) bool { return false } -type markIPsCache struct { +type MarkIPsCache struct { mu sync.Mutex hasSyncOnce bool markToIPs markIPs IPToMark map[string]int } -func newMarkIPsCache() *markIPsCache { - return &markIPsCache{ +func NewMarkIPsCache() *MarkIPsCache { + return &MarkIPsCache{ mu: sync.Mutex{}, markToIPs: markIPs{ v4: make(map[int]string), @@ -93,7 +93,7 @@ func newMarkIPsCache() *markIPsCache { } } -func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { +func (mic *MarkIPsCache) IsIPPresent(ip net.IP) bool { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -103,7 +103,7 @@ func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { return isFound } -func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *MarkIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -113,7 +113,7 @@ func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.IPToMark[ip.String()] = pktMark.ToInt() } -func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *MarkIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -123,7 +123,7 @@ func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { delete(mic.IPToMark, ip.String()) } -func (mic *markIPsCache) replaceAll(markIPs markIPs) { +func (mic *MarkIPsCache) replaceAll(markIPs markIPs) { mic.mu.Lock() mic.markToIPs = markIPs for mark, ipv4 := range markIPs.v4 { @@ -135,7 +135,7 @@ func (mic *markIPsCache) replaceAll(markIPs markIPs) { mic.mu.Unlock() } -func (mic *markIPsCache) GetIPv4() map[int]string { +func (mic *MarkIPsCache) GetIPv4() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -148,7 +148,7 @@ func (mic *markIPsCache) GetIPv4() map[int]string { return dupe } -func (mic *markIPsCache) GetIPv6() map[int]string { +func (mic *MarkIPsCache) GetIPv6() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -161,19 +161,19 @@ func (mic *markIPsCache) GetIPv6() map[int]string { return dupe } -func (mic *markIPsCache) HasSyncdOnce() bool { +func (mic *MarkIPsCache) HasSyncdOnce() bool { mic.mu.Lock() defer mic.mu.Unlock() return mic.hasSyncOnce } -func (mic *markIPsCache) setSyncdOnce() { +func (mic *MarkIPsCache) setSyncdOnce() { mic.mu.Lock() mic.hasSyncOnce = true mic.mu.Unlock() } -type bridgeEIPAddrManager struct { +type BridgeEIPAddrManager struct { nodeName string bridgeName string nodeAnnotationMu sync.Mutex @@ -182,18 +182,18 @@ type bridgeEIPAddrManager struct { nodeLister corev1listers.NodeLister kube kube.Interface addrManager *linkmanager.Controller - cache *markIPsCache + cache *MarkIPsCache } -// newBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user +// NewBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user // defined networks. It saves the assigned IPs to its respective Node annotation in-order to understand which IPs it assigned // prior to restarting. // It provides the assigned IPs info node IP handler. Node IP handler must not consider assigned EgressIP IPs as possible node IPs. // Openflow manager must generate the SNAT openflow conditional on packet marks and therefore needs access to EIP IPs and associated packet marks. -// bridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. -func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, - kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *bridgeEIPAddrManager { - return &bridgeEIPAddrManager{ +// BridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. +func NewBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, + kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *BridgeEIPAddrManager { + return &BridgeEIPAddrManager{ nodeName: nodeName, // k8 node name bridgeName: bridgeName, // bridge name for which EIP IPs are managed nodeAnnotationMu: sync.Mutex{}, // mu for updating Node annotation @@ -202,15 +202,15 @@ func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanag nodeLister: nodeInformer.Lister(), kube: kube, addrManager: linkManager, - cache: newMarkIPsCache(), // cache to store pkt mark -> EIP IP. + cache: NewMarkIPsCache(), // cache to store pkt mark -> EIP IP. } } -func (g *bridgeEIPAddrManager) GetCache() *markIPsCache { +func (g *BridgeEIPAddrManager) GetCache() *MarkIPsCache { return g.cache } -func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) AddEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -237,7 +237,7 @@ func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, erro return isUpdated, nil } -func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { var isUpdated bool // at most, one status item for this node will be found. for _, oldStatus := range oldEIP.Status.Items { @@ -293,7 +293,7 @@ func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressI return isUpdated, nil } -func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) DeleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -322,7 +322,7 @@ func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, e return isUpdated, nil } -func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { +func (g *BridgeEIPAddrManager) SyncEgressIP(objs []interface{}) error { // caller must synchronise annotIPs, err := g.getAnnotationIPs() if err != nil { @@ -380,7 +380,7 @@ func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { // addIPToAnnotation adds an address to the collection of existing addresses stored in the nodes annotation. Caller // may repeat addition of addresses without care for duplicate addresses being added. -func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { +func (g *BridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -412,7 +412,7 @@ func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { // deleteIPsFromAnnotation deletes address from annotation. If multiple users, callers must synchronise. // deletion of address that doesn't exist will not cause an error. -func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { +func (g *BridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -446,7 +446,7 @@ func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) e }) } -func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { +func (g *BridgeEIPAddrManager) addIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -454,7 +454,7 @@ func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { return g.addrManager.AddAddress(getEIPBridgeNetlinkAddress(ip, link.Attrs().Index)) } -func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { +func (g *BridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -464,7 +464,7 @@ func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { // getAnnotationIPs retrieves the egress IP annotation from the current node Nodes object. If multiple users, callers must synchronise. // if annotation isn't present, empty set is returned -func (g *bridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { +func (g *BridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { node, err := g.nodeLister.Get(g.nodeName) if err != nil { return nil, fmt.Errorf("failed to get node %s from lister: %v", g.nodeName, err) diff --git a/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go b/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go new file mode 100644 index 0000000000..d9d627c882 --- /dev/null +++ b/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go @@ -0,0 +1,13 @@ +package egressip + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNodeSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Gateway EgressIP Suite") +} diff --git a/go-controller/pkg/node/gateway_egressip_test.go b/go-controller/pkg/node/egressip/gateway_egressip_test.go similarity index 95% rename from go-controller/pkg/node/gateway_egressip_test.go rename to go-controller/pkg/node/egressip/gateway_egressip_test.go index db43f7450a..07a03a87b6 100644 --- a/go-controller/pkg/node/gateway_egressip_test.go +++ b/go-controller/pkg/node/egressip/gateway_egressip_test.go @@ -1,4 +1,4 @@ -package node +package egressip import ( "fmt" @@ -67,7 +67,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -82,7 +82,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "", ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -97,7 +97,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "not-an-integer", ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).Should(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -117,7 +117,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -140,7 +140,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -162,10 +162,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.updateEgressIP(assignedEIP, unassignedEIP) + isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP, unassignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -191,10 +191,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) assignedEIP1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) assignedEIP2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP1) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP1) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.updateEgressIP(assignedEIP1, assignedEIP2) + isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP1, assignedEIP2) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -221,10 +221,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.deleteEgressIP(eip) + isUpdated, err = addrMgr.DeleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -240,7 +240,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.deleteEgressIP(eip) + isUpdated, err := addrMgr.DeleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) node, err := addrMgr.nodeLister.Get(nodeName) @@ -265,7 +265,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) eipUnassigned3 := getEIPNotAssignedToNode(mark3, ipV4Addr3) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -289,7 +289,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -306,7 +306,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) // previously configured IP defer stopFn() eipAssigned := getEIPAssignedToNode(nodeName, "", ipV4Addr) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") @@ -315,7 +315,7 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { }) }) -func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*bridgeEIPAddrManager, func()) { +func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*BridgeEIPAddrManager, func()) { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: nodeName, Annotations: map[string]string{}}, } @@ -327,7 +327,7 @@ func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string gomega.Expect(watchFactory.Start()).Should(gomega.Succeed(), "watch factory should start") gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "watch factory creation must succeed") linkManager := linkmanager.NewController(nodeName, true, true, nil) - return newBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), + return NewBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), watchFactory.Shutdown } diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index db1bcae279..cae74284b7 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -17,6 +17,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -49,7 +50,7 @@ type gateway struct { nodePortWatcher informer.ServiceAndEndpointsEventHandler openflowManager *openflowManager nodeIPManager *addressManager - bridgeEIPAddrManager *bridgeEIPAddrManager + bridgeEIPAddrManager *egressip.BridgeEIPAddrManager initFunc func() error readyFunc func() (bool, error) @@ -233,7 +234,7 @@ func (g *gateway) AddEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.addEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.AddEgressIP(eip) if err != nil { return err } @@ -249,7 +250,7 @@ func (g *gateway) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.updateEgressIP(oldEIP, newEIP) + isSyncRequired, err := g.bridgeEIPAddrManager.UpdateEgressIP(oldEIP, newEIP) if err != nil { return err } @@ -265,7 +266,7 @@ func (g *gateway) DeleteEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.deleteEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.DeleteEgressIP(eip) if err != nil { return err } @@ -281,7 +282,7 @@ func (g *gateway) SyncEgressIP(eips []interface{}) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - if err := g.bridgeEIPAddrManager.syncEgressIP(eips); err != nil { + if err := g.bridgeEIPAddrManager.SyncEgressIP(eips); err != nil { return err } if err := g.Reconcile(); err != nil { @@ -552,7 +553,7 @@ type bridgeConfiguration struct { ofPortPhys string ofPortHost string netConfig map[string]*bridgeUDNConfiguration - eipMarkIPs *markIPsCache + eipMarkIPs *egressip.MarkIPsCache nextHops []net.IP } @@ -606,7 +607,7 @@ func bridgeForInterface(intfName, nodeName, netConfig: map[string]*bridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, - eipMarkIPs: newMarkIPsCache(), + eipMarkIPs: egressip.NewMarkIPsCache(), } if len(gwNextHops) > 0 { res.nextHops = gwNextHops diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 922a68a2bd..bc8317c8de 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -26,6 +26,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" @@ -2470,7 +2471,7 @@ func newGateway( } } if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { - gw.bridgeEIPAddrManager = newBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) gwBridge.eipMarkIPs = gw.bridgeEIPAddrManager.GetCache() } gw.nodeIPManager = newAddressManager(nodeName, kube, mgmtPort, watchFactory, gwBridge) From b65a01efe4b96ae36c9256c175e6c25d5c903b76 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 12:57:30 +0200 Subject: [PATCH 122/181] [node/bridgeconfig] move [udn]bridgeconfig to ite own package. Make all fields and methods public for now. Create node/util package for shared functions. Signed-off-by: Nadia Pinaeva --- go-controller/.golangci.yml | 4 + .../pkg/node/bridgeconfig/bridgeconfig.go | 423 ++++++++++++++++++ .../default_node_network_controller_test.go | 49 +- go-controller/pkg/node/gateway.go | 234 +--------- go-controller/pkg/node/gateway_init.go | 143 +----- .../pkg/node/gateway_init_linux_test.go | 43 +- .../pkg/node/gateway_localnet_linux_test.go | 11 +- go-controller/pkg/node/gateway_nftables.go | 11 +- go-controller/pkg/node/gateway_shared_intf.go | 327 +++++++------- go-controller/pkg/node/gateway_udn.go | 150 +------ go-controller/pkg/node/gateway_udn_test.go | 73 +-- go-controller/pkg/node/helper_linux.go | 17 - .../pkg/node/node_ip_handler_linux.go | 13 +- .../pkg/node/node_ip_handler_linux_test.go | 3 +- go-controller/pkg/node/openflow_manager.go | 53 +-- go-controller/pkg/node/types/const.go | 6 + go-controller/pkg/node/util/util.go | 92 ++++ .../pkg/node/util/util_suite_test.go | 13 + go-controller/pkg/node/util/util_test.go | 57 +++ 19 files changed, 905 insertions(+), 817 deletions(-) create mode 100644 go-controller/pkg/node/bridgeconfig/bridgeconfig.go create mode 100644 go-controller/pkg/node/types/const.go create mode 100644 go-controller/pkg/node/util/util.go create mode 100644 go-controller/pkg/node/util/util_suite_test.go create mode 100644 go-controller/pkg/node/util/util_test.go diff --git a/go-controller/.golangci.yml b/go-controller/.golangci.yml index d381676a37..91be64adc3 100644 --- a/go-controller/.golangci.yml +++ b/go-controller/.golangci.yml @@ -60,6 +60,10 @@ linters-settings: # Other frequently used deps - pkg: github.com/ovn-kubernetes/libovsdb/ovsdb alias: "" + - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util + alias: nodeutil + - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types + alias: nodetypes revive: rules: diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go new file mode 100644 index 0000000000..c3f3beae32 --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -0,0 +1,423 @@ +package bridgeconfig + +import ( + "fmt" + "net" + "strings" + "sync" + "sync/atomic" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// BridgeUDNConfiguration holds the patchport and ctMark +// information for a given network +type BridgeUDNConfiguration struct { + PatchPort string + OfPortPatch string + MasqCTMark string + PktMark string + V4MasqIPs *udn.MasqueradeIPs + V6MasqIPs *udn.MasqueradeIPs + Subnets []config.CIDRNetworkEntry + NodeSubnets []*net.IPNet + Advertised atomic.Bool +} + +func (netConfig *BridgeUDNConfiguration) ShallowCopy() *BridgeUDNConfiguration { + copy := &BridgeUDNConfiguration{ + PatchPort: netConfig.PatchPort, + OfPortPatch: netConfig.OfPortPatch, + MasqCTMark: netConfig.MasqCTMark, + PktMark: netConfig.PktMark, + V4MasqIPs: netConfig.V4MasqIPs, + V6MasqIPs: netConfig.V6MasqIPs, + Subnets: netConfig.Subnets, + NodeSubnets: netConfig.NodeSubnets, + } + netConfig.Advertised.Store(netConfig.Advertised.Load()) + return copy +} + +func (netConfig *BridgeUDNConfiguration) IsDefaultNetwork() bool { + return netConfig.MasqCTMark == nodetypes.CtMarkOVN +} + +func (netConfig *BridgeUDNConfiguration) SetBridgeNetworkOfPortsInternal() error { + ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.PatchPort, "ofport") + if err != nil { + return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ + "while getting ofport. stderr: %v, error: %v", netConfig.PatchPort, stderr, err) + } + netConfig.OfPortPatch = ofportPatch + return nil +} + +type BridgeConfiguration struct { + sync.Mutex + NodeName string + BridgeName string + UplinkName string + GwIface string + GwIfaceRep string + Ips []*net.IPNet + InterfaceID string + MacAddress net.HardwareAddr + OfPortPhys string + OfPortHost string + NetConfig map[string]*BridgeUDNConfiguration + EipMarkIPs *egressip.MarkIPsCache + NextHops []net.IP +} + +func (b *BridgeConfiguration) GetGatewayIface() string { + // If GwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. + if b.GwIface != "" { + return b.GwIface + } + return b.BridgeName +} + +// UpdateInterfaceIPAddresses sets and returns the bridge's current ips +func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { + b.Lock() + defer b.Unlock() + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) + if err != nil { + return nil, err + } + + // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's + // host internal IP address instead of the DPU's external bridge IP address. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + nodeAddrStr, err := util.GetNodePrimaryIP(node) + if err != nil { + return nil, err + } + nodeAddr := net.ParseIP(nodeAddrStr) + if nodeAddr == nil { + return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) + } + ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) + if err != nil { + return nil, err + } + } + + b.Ips = ifAddrs + return ifAddrs, nil +} + +func BridgeForInterface(intfName, nodeName, + physicalNetworkName string, + nodeSubnets, gwIPs []*net.IPNet, + gwNextHops []net.IP, + advertised bool) (*BridgeConfiguration, error) { + var intfRep string + var err error + isGWAcclInterface := false + gwIntf := intfName + + defaultNetConfig := &BridgeUDNConfiguration{ + MasqCTMark: nodetypes.CtMarkOVN, + Subnets: config.Default.ClusterSubnets, + NodeSubnets: nodeSubnets, + } + res := BridgeConfiguration{ + NodeName: nodeName, + NetConfig: map[string]*BridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + EipMarkIPs: egressip.NewMarkIPsCache(), + } + if len(gwNextHops) > 0 { + res.NextHops = gwNextHops + } + res.NetConfig[types.DefaultNetworkName].Advertised.Store(advertised) + + if config.Gateway.GatewayAcceleratedInterface != "" { + // Try to get representor for the specified gateway device. + // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device + // for node IP, Host Ofport for Openflow etc. + // If failed - error for improper configuration option + intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) + if err != nil { + return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) + } + gwIntf = config.Gateway.GatewayAcceleratedInterface + isGWAcclInterface = true + klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) + } else { + intfRep, err = getRepresentor(gwIntf) + if err == nil { + isGWAcclInterface = true + } + } + + if isGWAcclInterface { + bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) + if err != nil { + return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) + } + link, err := util.GetNetLinkOps().LinkByName(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) + } + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.BridgeName = bridgeName + res.UplinkName = uplinkName + res.GwIfaceRep = intfRep + res.GwIface = gwIntf + res.MacAddress = link.Attrs().HardwareAddr + } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { + // This is an OVS bridge's internal port + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.BridgeName = bridgeName + res.GwIface = bridgeName + res.UplinkName = uplinkName + gwIntf = bridgeName + } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { + // This is not a OVS bridge. We need to create a OVS bridge + // and add cluster.GatewayIntf as a port of that bridge. + bridgeName, err := util.NicToBridge(intfName) + if err != nil { + return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) + } + res.BridgeName = bridgeName + res.GwIface = bridgeName + res.UplinkName = intfName + gwIntf = bridgeName + } else { + // gateway interface is an OVS bridge + uplinkName, err := getIntfName(intfName) + if err != nil { + if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { + klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) + } else { + return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) + } + } else { + res.UplinkName = uplinkName + } + res.BridgeName = intfName + res.GwIface = intfName + } + // Now, we get IP addresses for the bridge + if len(gwIPs) > 0 { + // use gwIPs if provided + res.Ips = gwIPs + } else { + // get IP addresses from OVS bridge. If IP does not exist, + // error out. + res.Ips, err = nodeutil.GetNetworkInterfaceIPAddresses(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) + } + } + + if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface + res.MacAddress, err = util.GetOVSPortMACAddress(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) + } + } + + res.InterfaceID, err = bridgedGatewayNodeSetup(nodeName, res.BridgeName, physicalNetworkName) + if err != nil { + return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) + } + + // the name of the patch port created by ovn-controller is of the form + // patch--to-br-int + defaultNetConfig.PatchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.BridgeName, nodeName) + + // for DPU we use the host MAC address for the Gateway configuration + if config.OvnKubeNode.Mode == types.NodeModeDPU { + hostRep, err := util.GetDPUHostInterface(res.BridgeName) + if err != nil { + return nil, err + } + res.MacAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) + if err != nil { + return nil, err + } + } + return &res, nil +} + +func getRepresentor(intfName string) (string, error) { + deviceID, err := util.GetDeviceIDFromNetdevice(intfName) + if err != nil { + return "", err + } + + return util.GetFunctionRepresentorName(deviceID) +} + +// GetBridgePortConfigurations returns a slice of Network port configurations along with the +// uplinkName and physical port's ofport value +func (b *BridgeConfiguration) GetBridgePortConfigurations() ([]*BridgeUDNConfiguration, string, string) { + b.Lock() + defer b.Unlock() + var netConfigs []*BridgeUDNConfiguration + for _, netConfig := range b.NetConfig { + netConfigs = append(netConfigs, netConfig.ShallowCopy()) + } + return netConfigs, b.UplinkName, b.OfPortPhys +} + +// AddNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache +func (b *BridgeConfiguration) AddNetworkBridgeConfig( + nInfo util.NetInfo, + nodeSubnets []*net.IPNet, + masqCTMark, pktMark uint, + v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { + b.Lock() + defer b.Unlock() + + netName := nInfo.GetNetworkName() + patchPort := nInfo.GetNetworkScopedPatchPortName(b.BridgeName, b.NodeName) + + _, found := b.NetConfig[netName] + if !found { + netConfig := &BridgeUDNConfiguration{ + PatchPort: patchPort, + MasqCTMark: fmt.Sprintf("0x%x", masqCTMark), + PktMark: fmt.Sprintf("0x%x", pktMark), + V4MasqIPs: v4MasqIPs, + V6MasqIPs: v6MasqIPs, + Subnets: nInfo.Subnets(), + NodeSubnets: nodeSubnets, + } + netConfig.Advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.NodeName)) + + b.NetConfig[netName] = netConfig + } else { + klog.Warningf("Trying to update bridge config for network %s which already"+ + "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) + } + return nil +} + +// DelNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache +func (b *BridgeConfiguration) DelNetworkBridgeConfig(nInfo util.NetInfo) { + b.Lock() + defer b.Unlock() + + delete(b.NetConfig, nInfo.GetNetworkName()) +} + +func (b *BridgeConfiguration) GetNetworkBridgeConfig(networkName string) *BridgeUDNConfiguration { + b.Lock() + defer b.Unlock() + return b.NetConfig[networkName] +} + +// GetActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the +// provided netInfo. +// +// NOTE: if the network configuration can't be found or if the network is not patched by OVN +// yet this returns nil. +func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName string) *BridgeUDNConfiguration { + b.Lock() + defer b.Unlock() + + if netConfig, found := b.NetConfig[networkName]; found && netConfig.OfPortPatch != "" { + return netConfig.ShallowCopy() + } + return nil +} + +func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { + result := make([]*BridgeUDNConfiguration, 0, len(b.NetConfig)) + for _, netConfig := range b.NetConfig { + if netConfig.OfPortPatch == "" { + continue + } + result = append(result, netConfig) + } + return result +} + +func getIntfName(gatewayIntf string) (string, error) { + // The given (or autodetected) interface is an OVS bridge and this could be + // created by us using util.NicToBridge() or it was pre-created by the user. + + // Is intfName a port of gatewayIntf? + intfName, err := util.GetNicName(gatewayIntf) + if err != nil { + return "", err + } + _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") + if err != nil { + return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + intfName, stderr, err) + } + return intfName, nil +} + +// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, +// and returns an ifaceID created from the bridge name and the node name +func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { + // IPv6 forwarding is enabled globally + if config.IPv4Mode { + // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { + return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", + bridgeName, stdout, stderr, err) + } + } + + // ovn-bridge-mappings maps a physical network name to a local ovs bridge + // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. + // Note that there may be multiple ovs bridge mappings, be sure not to override + // the mappings for the other physical network + stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", + "external_ids:ovn-bridge-mappings") + if err != nil { + return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) + } + // skip the existing mapping setting for the specified physicalNetworkName + mapString := "" + bridgeMappings := strings.Split(stdout, ",") + for _, bridgeMapping := range bridgeMappings { + m := strings.Split(bridgeMapping, ":") + if network := m[0]; network != physicalNetworkName { + if len(mapString) != 0 { + mapString += "," + } + mapString += bridgeMapping + } + } + if len(mapString) != 0 { + mapString += "," + } + mapString += physicalNetworkName + ":" + bridgeName + + _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", + fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) + if err != nil { + return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ + ", stderr:%s (%v)", bridgeName, stderr, err) + } + + ifaceID := bridgeName + "_" + nodeName + return ifaceID, nil +} diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index 875b0da694..de35b39e8d 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -22,6 +22,7 @@ import ( adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" @@ -809,14 +810,14 @@ var _ = Describe("Node", func() { Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, @@ -921,14 +922,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, @@ -1075,14 +1076,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, @@ -1186,14 +1187,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, @@ -1354,14 +1355,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, @@ -1482,14 +1483,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } nc.Gateway = &gateway{ openflowManager: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index cae74284b7..a617249c52 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -17,6 +17,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -355,14 +356,14 @@ func setupUDPAggregationUplink(ifname string) error { func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops []net.IP, nodeSubnets, gwIPs []*net.IPNet, advertised bool, nodeAnnotator kube.Annotator) ( - *bridgeConfiguration, *bridgeConfiguration, error) { - gatewayBridge, err := bridgeForInterface(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) + *bridgeconfig.BridgeConfiguration, *bridgeconfig.BridgeConfiguration, error) { + gatewayBridge, err := bridgeconfig.BridgeForInterface(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", gwIntf, err) } - var egressGWBridge *bridgeConfiguration + var egressGWBridge *bridgeconfig.BridgeConfiguration if egressGatewayIntf != "" { - egressGWBridge, err = bridgeForInterface(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) + egressGWBridge, err = bridgeconfig.BridgeForInterface(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", egressGatewayIntf, err) } @@ -381,7 +382,7 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops "IP fragmentation or large TCP/UDP payloads may not be forwarded correctly.") enableGatewayMTU = false } else { - chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.bridgeName) + chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.BridgeName) if err != nil { return nil, nil, err } @@ -415,9 +416,9 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } if config.Default.EnableUDPAggregation { - err = setupUDPAggregationUplink(gatewayBridge.uplinkName) + err = setupUDPAggregationUplink(gatewayBridge.UplinkName) if err == nil && egressGWBridge != nil { - err = setupUDPAggregationUplink(egressGWBridge.uplinkName) + err = setupUDPAggregationUplink(egressGWBridge.UplinkName) } if err != nil { klog.Warningf("Could not enable UDP packet aggregation on uplink interface (aggregation will be disabled): %v", err) @@ -433,18 +434,18 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops l3GwConfig := util.L3GatewayConfig{ Mode: config.Gateway.Mode, ChassisID: chassisID, - BridgeID: gatewayBridge.bridgeName, - InterfaceID: gatewayBridge.interfaceID, - MACAddress: gatewayBridge.macAddress, - IPAddresses: gatewayBridge.ips, + BridgeID: gatewayBridge.BridgeName, + InterfaceID: gatewayBridge.InterfaceID, + MACAddress: gatewayBridge.MacAddress, + IPAddresses: gatewayBridge.Ips, NextHops: gwNextHops, NodePortEnable: config.Gateway.NodeportEnable, VLANID: &config.Gateway.VLANID, } if egressGWBridge != nil { - l3GwConfig.EgressGWInterfaceID = egressGWBridge.interfaceID - l3GwConfig.EgressGWMACAddress = egressGWBridge.macAddress - l3GwConfig.EgressGWIPAddresses = egressGWBridge.ips + l3GwConfig.EgressGWInterfaceID = egressGWBridge.InterfaceID + l3GwConfig.EgressGWMACAddress = egressGWBridge.MacAddress + l3GwConfig.EgressGWIPAddresses = egressGWBridge.Ips } err = util.SetL3GatewayConfig(nodeAnnotator, &l3GwConfig) @@ -466,7 +467,7 @@ func (g *gateway) GetGatewayBridgeIface() string { } func (g *gateway) GetGatewayIface() string { - return g.openflowManager.defaultBridge.gwIface + return g.openflowManager.defaultBridge.GetGatewayIface() } // getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle @@ -481,11 +482,11 @@ func (g *gateway) SetDefaultGatewayBridgeMAC(macAddr net.HardwareAddr) { } func (g *gateway) SetDefaultPodNetworkAdvertised(isPodNetworkAdvertised bool) { - g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Store(isPodNetworkAdvertised) + g.openflowManager.defaultBridge.NetConfig[types.DefaultNetworkName].Advertised.Store(isPodNetworkAdvertised) } func (g *gateway) GetDefaultPodNetworkAdvertised() bool { - return g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Load() + return g.openflowManager.defaultBridge.NetConfig[types.DefaultNetworkName].Advertised.Load() } // Reconcile handles triggering updates to different components of a gateway, like OFM, Services @@ -539,202 +540,3 @@ func (g *gateway) updateSNATRules() error { return addLocalGatewayPodSubnetNATRules(subnets...) } - -type bridgeConfiguration struct { - sync.Mutex - nodeName string - bridgeName string - uplinkName string - gwIface string - gwIfaceRep string - ips []*net.IPNet - interfaceID string - macAddress net.HardwareAddr - ofPortPhys string - ofPortHost string - netConfig map[string]*bridgeUDNConfiguration - eipMarkIPs *egressip.MarkIPsCache - nextHops []net.IP -} - -// updateInterfaceIPAddresses sets and returns the bridge's current ips -func (b *bridgeConfiguration) updateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.Lock() - defer b.Unlock() - ifAddrs, err := getNetworkInterfaceIPAddresses(b.gwIface) - if err != nil { - return nil, err - } - - // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead of the DPU's external bridge IP address. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - nodeAddrStr, err := util.GetNodePrimaryIP(node) - if err != nil { - return nil, err - } - nodeAddr := net.ParseIP(nodeAddrStr) - if nodeAddr == nil { - return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) - } - ifAddrs, err = getDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) - if err != nil { - return nil, err - } - } - - b.ips = ifAddrs - return ifAddrs, nil -} - -func bridgeForInterface(intfName, nodeName, - physicalNetworkName string, - nodeSubnets, gwIPs []*net.IPNet, - gwNextHops []net.IP, - advertised bool) (*bridgeConfiguration, error) { - var intfRep string - var err error - isGWAcclInterface := false - gwIntf := intfName - - defaultNetConfig := &bridgeUDNConfiguration{ - masqCTMark: ctMarkOVN, - subnets: config.Default.ClusterSubnets, - nodeSubnets: nodeSubnets, - } - res := bridgeConfiguration{ - nodeName: nodeName, - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - eipMarkIPs: egressip.NewMarkIPsCache(), - } - if len(gwNextHops) > 0 { - res.nextHops = gwNextHops - } - res.netConfig[types.DefaultNetworkName].advertised.Store(advertised) - - if config.Gateway.GatewayAcceleratedInterface != "" { - // Try to get representor for the specified gateway device. - // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device - // for node IP, Host Ofport for Openflow etc. - // If failed - error for improper configuration option - intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) - if err != nil { - return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) - } - gwIntf = config.Gateway.GatewayAcceleratedInterface - isGWAcclInterface = true - klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) - } else { - intfRep, err = getRepresentor(gwIntf) - if err == nil { - isGWAcclInterface = true - } - } - - if isGWAcclInterface { - bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) - if err != nil { - return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) - } - link, err := util.GetNetLinkOps().LinkByName(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) - } - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.uplinkName = uplinkName - res.gwIfaceRep = intfRep - res.gwIface = gwIntf - res.macAddress = link.Attrs().HardwareAddr - } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { - // This is an OVS bridge's internal port - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = uplinkName - gwIntf = bridgeName - } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { - // This is not a OVS bridge. We need to create a OVS bridge - // and add cluster.GatewayIntf as a port of that bridge. - bridgeName, err := util.NicToBridge(intfName) - if err != nil { - return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = intfName - gwIntf = bridgeName - } else { - // gateway interface is an OVS bridge - uplinkName, err := getIntfName(intfName) - if err != nil { - if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { - klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) - } else { - return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) - } - } else { - res.uplinkName = uplinkName - } - res.bridgeName = intfName - res.gwIface = intfName - } - // Now, we get IP addresses for the bridge - if len(gwIPs) > 0 { - // use gwIPs if provided - res.ips = gwIPs - } else { - // get IP addresses from OVS bridge. If IP does not exist, - // error out. - res.ips, err = getNetworkInterfaceIPAddresses(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) - } - } - - if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface - res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) - } - } - - res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) - if err != nil { - return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) - } - - // the name of the patch port created by ovn-controller is of the form - // patch--to-br-int - defaultNetConfig.patchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) - - // for DPU we use the host MAC address for the Gateway configuration - if config.OvnKubeNode.Mode == types.NodeModeDPU { - hostRep, err := util.GetDPUHostInterface(res.bridgeName) - if err != nil { - return nil, err - } - res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) - if err != nil { - return nil, err - } - } - return &res, nil -} - -func getRepresentor(intfName string) (string, error) { - deviceID, err := util.GetDeviceIDFromNetdevice(intfName) - if err != nil { - return "", err - } - - return util.GetFunctionRepresentorName(deviceID) -} diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 28e0fa669b..4fe0b244fd 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -18,96 +18,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, -// and returns an ifaceID created from the bridge name and the node name -func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { - // IPv6 forwarding is enabled globally - if config.IPv4Mode { - // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) - // systctl output enforces dot as path separator - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { - return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", - bridgeName, stdout, stderr, err) - } - } - - // ovn-bridge-mappings maps a physical network name to a local ovs bridge - // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. - // Note that there may be multiple ovs bridge mappings, be sure not to override - // the mappings for the other physical network - stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", - "external_ids:ovn-bridge-mappings") - if err != nil { - return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) - } - // skip the existing mapping setting for the specified physicalNetworkName - mapString := "" - bridgeMappings := strings.Split(stdout, ",") - for _, bridgeMapping := range bridgeMappings { - m := strings.Split(bridgeMapping, ":") - if network := m[0]; network != physicalNetworkName { - if len(mapString) != 0 { - mapString += "," - } - mapString += bridgeMapping - } - } - if len(mapString) != 0 { - mapString += "," - } - mapString += physicalNetworkName + ":" + bridgeName - - _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", - fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) - if err != nil { - return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ - ", stderr:%s (%v)", bridgeName, stderr, err) - } - - ifaceID := bridgeName + "_" + nodeName - return ifaceID, nil -} - -// getNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. -func getNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { - allIPs, err := util.GetFilteredInterfaceV4V6IPs(iface) - if err != nil { - return nil, fmt.Errorf("could not find IP addresses: %v", err) - } - - var ips []*net.IPNet - var foundIPv4 bool - var foundIPv6 bool - for _, ip := range allIPs { - if utilnet.IsIPv6CIDR(ip) { - if config.IPv6Mode && !foundIPv6 { - // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet - // in the routing table - subnetIP, err := util.GetIPv6OnSubnet(iface, ip) - if err != nil { - return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) - } - ips = append(ips, subnetIP) - foundIPv6 = true - } - } else if config.IPv4Mode && !foundIPv4 { - ips = append(ips, ip) - foundIPv4 = true - } - } - if config.IPv4Mode && !foundIPv4 { - return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) - } else if config.IPv6Mode && !foundIPv6 { - return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) - } - return ips, nil -} - func getGatewayNextHops() ([]net.IP, string, error) { var gatewayNextHops []net.IP var needIPv4NextHop bool @@ -218,52 +133,6 @@ func getGatewayNextHops() ([]net.IP, string, error) { return gatewayNextHops, gatewayIntf, nil } -// getDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP -// and DPU IP subnet overriden by config config.Gateway.RouterSubnet -func getDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { - // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information - // for each node. - var gwIps []*net.IPNet - isIPv4 := utilnet.IsIPv4(k8sNodeIP) - - // override subnet mask via config - if config.Gateway.RouterSubnet != "" { - _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) - if err != nil { - return nil, err - } - if utilnet.IsIPv4CIDR(addr) != isIPv4 { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "does not match Node IP address format", config.Gateway.RouterSubnet) - } - if !addr.Contains(k8sNodeIP) { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) - } - addr.IP = k8sNodeIP - gwIps = append(gwIps, addr) - } else { - // Assume Host and DPU share the same subnet - // in this case just update the matching IPNet with the Host's IP address - for _, addr := range ifAddrs { - if utilnet.IsIPv4CIDR(addr) != isIPv4 { - continue - } - // expect k8s Node IP to be contained in the given subnet - if !addr.Contains(k8sNodeIP) { - continue - } - newAddr := *addr - newAddr.IP = k8sNodeIP - gwIps = append(gwIps, &newAddr) - } - if len(gwIps) == 0 { - return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) - } - } - return gwIps, nil -} - // getInterfaceByIP retrieves Interface that has `ip` assigned to it func getInterfaceByIP(ip net.IP) (string, error) { links, err := util.GetNetLinkOps().LinkList() @@ -345,7 +214,7 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( egressGWInterface = interfaceForEXGW(config.Gateway.EgressGWInterface) } - ifAddrs, err = getNetworkInterfaceIPAddresses(gatewayIntf) + ifAddrs, err = nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) if err != nil { return nil, err } @@ -353,7 +222,7 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( // For DPU need to use the host IP addr which currently is assumed to be K8s Node cluster // internal IP address. if config.OvnKubeNode.Mode == types.NodeModeDPU { - ifAddrs, err = getDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) + ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) if err != nil { return nil, err } @@ -474,7 +343,7 @@ func (nc *DefaultNodeNetworkController) initGatewayMainStart(gw *gateway, waiter // interfaceForEXGW takes the interface requested to act as exgw bridge // and returns the name of the bridge if exists, or the interface itself -// if the bridge needs to be created. In this last scenario, bridgeForInterface +// if the bridge needs to be created. In this last scenario, BridgeForInterface // will create the bridge. func interfaceForEXGW(intfName string) string { if _, _, err := util.RunOVSVsctl("br-exists", intfName); err == nil { @@ -510,7 +379,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er return err } - ifAddrs, err := getNetworkInterfaceIPAddresses(gatewayIntf) + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) if err != nil { return err } @@ -605,7 +474,7 @@ func CleanupClusterNode(name string) error { func (nc *DefaultNodeNetworkController) updateGatewayMAC(link netlink.Link) error { // TBD-merge for dpu-host mode: if interface mac of the dpu-host interface that connects to the // gateway bridge on the dpu changes, we need to update dpu's gatewayBridge.macAddress L3 gateway - // annotation (see bridgeForInterface) + // annotation (see BridgeForInterface) if config.OvnKubeNode.Mode != types.NodeModeFull { return nil } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 8bc38dcbf7..9e1fc9213c 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -564,7 +564,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, // exec Mocks fexec := ovntest.NewLooseCompareFakeExec() // gatewayInitInternal - // bridgeForInterface + // BridgeForInterface fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 port-to-br " + brphys, Err: fmt.Errorf(""), @@ -1655,47 +1655,6 @@ var _ = Describe("Gateway unit tests", func() { util.SetNetLinkOpMockInst(origNetlinkInst) }) - Context("getDPUHostPrimaryIPAddresses", func() { - - It("returns Gateway IP/Subnet for kubernetes node IP", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, - } - gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in host subnets", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.1.11") - _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - - It("returns node IP with config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.1.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, - } - gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - }) - Context("getInterfaceByIP", func() { It("Finds correct interface", func() { lnk := &linkMock.Link{} diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index 013234e1b1..87ef3aa72c 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -21,6 +21,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -56,8 +57,8 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", + defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", } fNPW := nodePortWatcher{ @@ -67,9 +68,9 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher serviceInfo: make(map[k8stypes.NamespacedName]*serviceConfig), ofm: &openflowManager{ flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - macAddress: gwMACParsed, - netConfig: map[string]*bridgeUDNConfiguration{ + defaultBridge: &bridgeconfig.BridgeConfiguration{ + MacAddress: gwMACParsed, + NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, }, diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index 6e341466ab..842bb417d1 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -12,6 +12,7 @@ import ( utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -68,10 +69,10 @@ func getNoSNATLoadBalancerIPRules(svcPort corev1.ServicePort, localEndpoints []s // getUDNNodePortMarkNFTRule returns a verdict map element (nftablesUDNMarkNodePortsMap) // with a key composed of the svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeUDNConfiguration) *knftables.Element { +func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeconfig.BridgeUDNConfiguration) *knftables.Element { var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} } return &knftables.Element{ Map: nftablesUDNMarkNodePortsMap, @@ -84,12 +85,12 @@ func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeUDNCon // getUDNExternalIPsMarkNFTRules returns a verdict map elements (nftablesUDNMarkExternalIPsV4Map or nftablesUDNMarkExternalIPsV6Map) // with a key composed of the external IP, svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeUDNConfiguration) []*knftables.Element { +func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { var nftRules []*knftables.Element var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} } for _, externalIP := range externalIPs { mapName := nftablesUDNMarkExternalIPsV4Map @@ -175,7 +176,7 @@ func getGatewayNFTRules(service *corev1.Service, localEndpoints []string, svcHas // getUDNNFTRules generates nftables rules for a UDN service. // If netConfig is nil, the resulting map elements will have empty values, // suitable only for entry removal. -func getUDNNFTRules(service *corev1.Service, netConfig *bridgeUDNConfiguration) []*knftables.Element { +func getUDNNFTRules(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { rules := make([]*knftables.Element, 0) for _, svcPort := range service.Spec.Ports { if util.ServiceTypeHasNodePort(service) { diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index bc8317c8de..6d467a1285 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -26,12 +26,14 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -50,8 +52,7 @@ const ( pmtudOpenFlowCookie = "0x0304" // ovsLocalPort is the name of the OVS bridge local port ovsLocalPort = "LOCAL" - // ctMarkOVN is the conntrack mark value for OVN traffic - ctMarkOVN = "0x1" + // ctMarkHost is the conntrack mark value for host traffic ctMarkHost = "0x2" // ovnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for @@ -235,7 +236,7 @@ type cidrAndFlags struct { func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { // Get Physical IPs of Node, Can be IPV4 IPV6 or both addressManager.gatewayBridge.Lock() - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.ips) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.Ips) addressManager.gatewayBridge.Unlock() npw.gatewayIPLock.Lock() @@ -265,7 +266,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI return nil } - var netConfig *bridgeUDNConfiguration + var netConfig *bridgeconfig.BridgeUDNConfiguration var actions string if add { @@ -273,7 +274,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI if netConfig == nil { return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) } - actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) + actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) } // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure @@ -353,7 +354,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // table=0, matches on return traffic from service nodePort and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) + cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) } } } @@ -423,14 +424,14 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI ipPrefix = "ipv6" } // table 2, user-defined network host -> OVN towards default cluster network services - defaultNetConfig := npw.ofm.defaultBridge.getActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) + defaultNetConfig := npw.ofm.defaultBridge.GetActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) // sample flow: cookie=0xdeff105, duration=2319.685s, table=2, n_packets=496, n_bytes=67111, priority=300, // ip,nw_dst=10.96.0.1 actions=mod_dl_dst:02:42:ac:12:00:03,output:"patch-breth0_ov" // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network flows := []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ "actions=set_field:%s->eth_dst,output:%s", defaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, - npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.ofPortPatch)} + npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)} if util.IsRouteAdvertisementsEnabled() { // if the network is advertised, then for the reply from kapi and dns services to go back // into the UDN's VRF we need flows that statically send this to the local port @@ -443,7 +444,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, ip, ip_src=%s,actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) } npw.ofm.updateFlowCacheEntry(key, flows) } @@ -470,7 +471,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // `actions`: "send to patchport" // `externalIPOrLBIngressIP` is either externalIP.IP or LB.status.ingress.IP // `ipType` is either "External" or "Ingress" -func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, +func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, hasLocalHostNetworkEp bool, protocol string, actions string, externalIPOrLBIngressIPs []string, ipType string, ofPorts []string) error { for _, externalIPOrLBIngressIP := range externalIPOrLBIngressIPs { @@ -501,7 +502,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, continue } // add the ARP bypass flow regardless of service type or gateway modes since its applicable in all scenarios. - arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.ofPortPatch, externalIPOrLBIngressIP, cookie) + arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.OfPortPatch, externalIPOrLBIngressIP, cookie) externalIPFlows = append(externalIPFlows, arpFlow) // This allows external traffic ingress when the svc's ExternalTrafficPolicy is // set to Local, and the backend pod is HostNetworked. We need to add @@ -538,7 +539,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, etpSvcOpenFlowCookie, npw.ofportPhys)) } else if config.Gateway.Mode == config.GatewayModeShared { // add the ICMP Fragmentation flow for shared gateway mode. - icmpFlow := generateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.ofPortPatch, npw.ofportPhys, cookie, 110) + icmpFlow := generateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.OfPortPatch, npw.ofportPhys, cookie, 110) externalIPFlows = append(externalIPFlows, icmpFlow) // case2 (see function description for details) externalIPFlows = append(externalIPFlows, @@ -549,7 +550,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, // table=0, matches on return traffic from service externalIP or LB ingress and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, %s=%s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) + cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) } npw.ofm.updateFlowCacheEntry(key, externalIPFlows) } @@ -707,7 +708,7 @@ func addServiceRules(service *corev1.Service, netInfo util.NetInfo, localEndpoin // For dpu or Full mode var err error var errors []error - var activeNetwork *bridgeUDNConfiguration + var activeNetwork *bridgeconfig.BridgeUDNConfiguration if npw != nil { if err = npw.updateServiceFlowCache(service, netInfo, true, svcHasLocalHostNetEndPnt); err != nil { errors = append(errors, err) @@ -1452,14 +1453,14 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro return utilerrors.Join(errors...) } -func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]string, error) { +func flowsForDefaultBridge(bridge *bridgeconfig.BridgeConfiguration, extraIPs []net.IP) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := bridge.ofPortPhys - bridgeMacAddress := bridge.macAddress.String() - ofPortHost := bridge.ofPortHost - bridgeIPs := bridge.ips + ofPortPhys := bridge.OfPortPhys + bridgeMacAddress := bridge.MacAddress.String() + ofPortHost := bridge.OfPortHost + bridgeIPs := bridge.Ips var dftFlows []string // 14 bytes of overhead for ethernet header (does not include VLAN) @@ -1496,12 +1497,12 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st if err != nil { return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, + defaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, config.Default.HostMasqConntrackZone, physicalIP.IP)) } @@ -1520,11 +1521,11 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st continue } - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, + defaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, config.Default.HostMasqConntrackZone)) } } @@ -1559,11 +1560,11 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, + defaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, config.Default.HostMasqConntrackZone, physicalIP.IP)) } @@ -1582,11 +1583,11 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st continue } - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, + defaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, config.Default.HostMasqConntrackZone)) } } @@ -1633,13 +1634,13 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { continue } - if netConfig.advertised.Load() { + if netConfig.Advertised.Load() { var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { + for _, clusterEntry := range netConfig.Subnets { udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) } // Filter subnets based on the clusterIP service family @@ -1666,19 +1667,19 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services masqDst = masqSubnet } - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ "actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR, + defaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR, protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ - "actions=drop", defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR)) + "actions=drop", defaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR)) } } @@ -1689,10 +1690,10 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st dftFlows = append(dftFlows, reassemblyFlows...) } if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { var actions string if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { - actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) + actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) } else { // packets larger than known acceptable MTU need to go to kernel for // potential fragmentation @@ -1702,26 +1703,26 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st } if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) } if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) + "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) } } if config.IPv4Mode { @@ -1757,25 +1758,25 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) } - defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] + defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] // table 2, dispatch from Host -> OVN dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, table=2, "+ "actions=set_field:%s->eth_dst,%soutput:%s", defaultOpenFlowCookie, - bridgeMacAddress, mod_vlan_id, defaultNetConfig.ofPortPatch)) + bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. if config.IPv4Mode { - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { continue } - srcIPOrSubnet := netConfig.v4MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { + srcIPOrSubnet := netConfig.V4MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { + for _, clusterEntry := range netConfig.Subnets { udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) } // Filter subnets based on the clusterIP service family @@ -1801,20 +1802,20 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.pktMark, - bridgeMacAddress, netConfig.ofPortPatch)) + defaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) } } if config.IPv6Mode { - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { continue } - srcIPOrSubnet := netConfig.v6MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { + srcIPOrSubnet := netConfig.V6MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { + for _, clusterEntry := range netConfig.Subnets { udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) } // Filter subnets based on the clusterIP service family @@ -1835,8 +1836,8 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.pktMark, - bridgeMacAddress, netConfig.ofPortPatch)) + defaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) } } @@ -1876,13 +1877,13 @@ func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]st return dftFlows, nil } -func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, error) { +func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeconfig.BridgeConfiguration) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := bridge.ofPortPhys - bridgeMacAddress := bridge.macAddress.String() - ofPortHost := bridge.ofPortHost - bridgeIPs := bridge.ips + ofPortPhys := bridge.OfPortPhys + bridgeMacAddress := bridge.MacAddress.String() + ofPortHost := bridge.OfPortHost + bridgeIPs := bridge.Ips var dftFlows []string @@ -1898,8 +1899,8 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin if ofPortPhys != "" { // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports actions := "" - for _, netConfig := range bridge.patchedNetConfigs() { - actions += "output:" + netConfig.ofPortPatch + "," + for _, netConfig := range bridge.PatchedNetConfigs() { + actions += "output:" + netConfig.OfPortPatch + "," } actions += strip_vlan + "NORMAL" dftFlows = append(dftFlows, @@ -1909,13 +1910,13 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - defaultOpenFlowCookie, netConfig.ofPortPatch)) + defaultOpenFlowCookie, netConfig.OfPortPatch)) } if config.IPv4Mode { @@ -1924,7 +1925,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // SNATs these into egressIP prior to reaching external bridge. @@ -1933,32 +1934,32 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { - if netConfig.masqCTMark != ctMarkOVN { - for mark, eip := range bridge.eipMarkIPs.GetIPv4() { + config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range bridge.EipMarkIPs.GetIPv4() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) } } } - // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN // so that reverse direction goes back to the pods. - if netConfig.isDefaultNetwork() { + if netConfig.IsDefaultNetwork() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, - netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, + netConfig.MasqCTMark, ofPortPhys)) // Allow (a) OVN->host traffic on the same node // (b) host->host traffic on the same node @@ -1970,8 +1971,8 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) } } @@ -1983,26 +1984,26 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) // We send BFD traffic coming from OVN to outside directly using a higher priority flow if ofPortPhys != "" { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) } } } @@ -2023,7 +2024,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // DNATs these into egressIP prior to reaching external bridge. @@ -2032,31 +2033,31 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { - if netConfig.masqCTMark != ctMarkOVN { - for mark, eip := range bridge.eipMarkIPs.GetIPv6() { + config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range bridge.EipMarkIPs.GetIPv6() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) } } } - // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN // so that reverse direction goes back to the pods. - if netConfig.isDefaultNetwork() { + if netConfig.IsDefaultNetwork() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) // Allow (a) OVN->host traffic on the same node // (b) host->host traffic on the same node @@ -2068,8 +2069,8 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) } } @@ -2082,26 +2083,26 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) if ofPortPhys != "" { // We send BFD traffic coming from OVN to outside directly using a higher priority flow dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) + defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) } } } @@ -2117,7 +2118,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // Due to the fact that ovn-controllers on different nodes apply the changes independently, // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] + defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] if config.OVNKubernetesFeature.EnableEgressIP { for _, clusterEntry := range config.Default.ClusterSubnets { cidr := clusterEntry.CIDR @@ -2125,9 +2126,9 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // table 0, drop packets coming from pods headed externally that were not SNATed. dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, ipv, ipv, cidr)) + defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipv, ipv, cidr)) } - for _, subnet := range defaultNetConfig.nodeSubnets { + for _, subnet := range defaultNetConfig.NodeSubnets { ipv := getIPv(subnet) if ofPortPhys != "" { // table 0, commit connections from local pods. @@ -2135,21 +2136,21 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, bridgeMacAddress, ipv, ipv, subnet, - config.Default.ConntrackZone, ctMarkOVN, ofPortPhys)) + defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, bridgeMacAddress, ipv, ipv, subnet, + config.Default.ConntrackZone, nodetypes.CtMarkOVN, ofPortPhys)) } } } if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { - isNetworkAdvertised := netConfig.advertised.Load() + for _, netConfig := range bridge.PatchedNetConfigs() { + isNetworkAdvertised := netConfig.Advertised.Load() // disableSNATMultipleGWs only applies to default network - disableSNATMultipleGWs := netConfig.isDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs + disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs if !disableSNATMultipleGWs && !isNetworkAdvertised { continue } - output := netConfig.ofPortPatch + output := netConfig.OfPortPatch if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { // except if advertised through BGP, go to kernel // TODO: MEG enabled pods should still go through the patch port @@ -2158,7 +2159,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // are assuming MEG & BGP are not used together output = ovsLocalPort } - for _, clusterEntry := range netConfig.subnets { + for _, clusterEntry := range netConfig.Subnets { cidr := clusterEntry.CIDR ipv := getIPv(cidr) dftFlows = append(dftFlows, @@ -2166,9 +2167,9 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin "actions=output:%s", defaultOpenFlowCookie, ipv, ipv, cidr, output)) } - if output == netConfig.ofPortPatch { + if output == netConfig.OfPortPatch { // except node management traffic - for _, subnet := range netConfig.nodeSubnets { + for _, subnet := range netConfig.NodeSubnets { mgmtIP := util.GetNodeManagementIfAddr(subnet) ipv := getIPv(mgmtIP) dftFlows = append(dftFlows, @@ -2197,7 +2198,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // We send BFD traffic both on the host and in ovn dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) + defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) } } @@ -2206,7 +2207,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin // We send BFD traffic both on the host and in ovn dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) + defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) } } @@ -2220,17 +2221,17 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin "actions=output:%s", defaultOpenFlowCookie, ofPortHost)) // Send UDN destined traffic to right patch port - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.masqCTMark != ctMarkOVN { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ - "actions=output:%s", defaultOpenFlowCookie, netConfig.masqCTMark, netConfig.ofPortPatch)) + "actions=output:%s", defaultOpenFlowCookie, netConfig.MasqCTMark, netConfig.OfPortPatch)) } } dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=1, table=11, "+ - "actions=output:%s", defaultOpenFlowCookie, defaultNetConfig.ofPortPatch)) + "actions=output:%s", defaultOpenFlowCookie, defaultNetConfig.OfPortPatch)) } // table 1, all other connections do normal processing @@ -2241,15 +2242,15 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]strin return dftFlows, nil } -func pmtudDropFlows(bridge *bridgeConfiguration, ipAddrs []string) []string { +func pmtudDropFlows(bridge *bridgeconfig.BridgeConfiguration, ipAddrs []string) []string { var flows []string if config.Gateway.Mode != config.GatewayModeShared { return nil } for _, addr := range ipAddrs { - for _, netConfig := range bridge.patchedNetConfigs() { + for _, netConfig := range bridge.PatchedNetConfigs() { flows = append(flows, - generateICMPFragmentationFlow(addr, outputPortDrop, netConfig.ofPortPatch, pmtudOpenFlowCookie, 700)) + generateICMPFragmentationFlow(addr, outputPortDrop, netConfig.OfPortPatch, pmtudOpenFlowCookie, 700)) } } @@ -2262,7 +2263,7 @@ func pmtudDropFlows(bridge *bridgeConfiguration, ipAddrs []string) []string { // when the localnet is mapped to breth0. // The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node // primary interface. -func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { +func hostNetworkNormalActionFlows(netConfig *bridgeconfig.BridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { var flows []string var ipFamily, ipFamilyDest string @@ -2296,7 +2297,7 @@ func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC stri if utilnet.IsIPv6(hostSubnet.IP) != isV6 { continue } - flows = append(flows, formatFlow(netConfig.ofPortPatch, hostSubnet.String(), netConfig.masqCTMark)) + flows = append(flows, formatFlow(netConfig.OfPortPatch, hostSubnet.String(), netConfig.MasqCTMark)) } } @@ -2329,7 +2330,7 @@ func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC stri // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode if config.Gateway.Mode == config.GatewayModeShared { flows = append(flows, - formatICMPFlow(netConfig.ofPortPatch, netConfig.masqCTMark, icmpType)) + formatICMPFlow(netConfig.OfPortPatch, netConfig.MasqCTMark, icmpType)) } // Traffic path (a) for ICMP: OVN->localnet for local gw mode @@ -2340,48 +2341,48 @@ func hostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC stri return flows } -func setBridgeOfPorts(bridge *bridgeConfiguration) error { +func setBridgeOfPorts(bridge *bridgeconfig.BridgeConfiguration) error { bridge.Lock() defer bridge.Unlock() // Get ofport of patchPort - for _, netConfig := range bridge.netConfig { - if err := netConfig.setBridgeNetworkOfPortsInternal(); err != nil { - return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.patchPort, err) + for _, netConfig := range bridge.NetConfig { + if err := netConfig.SetBridgeNetworkOfPortsInternal(); err != nil { + return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) } } - if bridge.uplinkName != "" { + if bridge.UplinkName != "" { // Get ofport of physical interface - ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", bridge.uplinkName, "ofport") + ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", bridge.UplinkName, "ofport") if err != nil { return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - bridge.uplinkName, stderr, err) + bridge.UplinkName, stderr, err) } - bridge.ofPortPhys = ofportPhys + bridge.OfPortPhys = ofportPhys } // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. if config.OvnKubeNode.Mode == types.NodeModeDPU { var stderr string - hostRep, err := util.GetDPUHostInterface(bridge.bridgeName) + hostRep, err := util.GetDPUHostInterface(bridge.BridgeName) if err != nil { return err } - bridge.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") + bridge.OfPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") if err != nil { return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", hostRep, stderr, err) } } else { var err error - if bridge.gwIfaceRep != "" { - bridge.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", bridge.gwIfaceRep, "ofport") + if bridge.GwIfaceRep != "" { + bridge.OfPortHost, _, err = util.RunOVSVsctl("get", "interface", bridge.GwIfaceRep, "ofport") if err != nil { - return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", bridge.gwIfaceRep, err) + return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", bridge.GwIfaceRep, err) } } else { - bridge.ofPortHost = ovsLocalPort + bridge.OfPortHost = ovsLocalPort } } @@ -2422,8 +2423,8 @@ func newGateway( if exGwBridge != nil { gw.readyFunc = func() (bool, error) { gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) + for _, netConfig := range gwBridge.NetConfig { + ready, err := gatewayReady(netConfig.PatchPort) if err != nil || !ready { gwBridge.Unlock() return false, err @@ -2431,8 +2432,8 @@ func newGateway( } gwBridge.Unlock() exGwBridge.Lock() - for _, netConfig := range exGwBridge.netConfig { - exGWReady, err := gatewayReady(netConfig.patchPort) + for _, netConfig := range exGwBridge.NetConfig { + exGWReady, err := gatewayReady(netConfig.PatchPort) if err != nil || !exGWReady { exGwBridge.Unlock() return false, err @@ -2444,8 +2445,8 @@ func newGateway( } else { gw.readyFunc = func() (bool, error) { gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) + for _, netConfig := range gwBridge.NetConfig { + ready, err := gatewayReady(netConfig.PatchPort) if err != nil || !ready { gwBridge.Unlock() return false, err @@ -2471,8 +2472,8 @@ func newGateway( } } if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { - gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) - gwBridge.eipMarkIPs = gw.bridgeEIPAddrManager.GetCache() + gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.BridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gwBridge.EipMarkIPs = gw.bridgeEIPAddrManager.GetCache() } gw.nodeIPManager = newAddressManager(nodeName, kube, mgmtPort, watchFactory, gwBridge) @@ -2480,15 +2481,15 @@ func newGateway( // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwBridge.gwIface, nodeName, watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gwBridge.GetGatewayIface(), nodeName, watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwBridge.gwIface); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.gwIface, err) + if err := setNodeMasqueradeIPOnExtBridge(gwBridge.GetGatewayIface()); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.GetGatewayIface(), err) } - if err := addMasqueradeRoute(routeManager, gwBridge.gwIface, nodeName, gwIPs, watchFactory); err != nil { + if err := addMasqueradeRoute(routeManager, gwBridge.GetGatewayIface(), nodeName, gwIPs, watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -2535,7 +2536,7 @@ func newGateway( gw.openflowManager.requestFlowSync() } - if err := addHostMACBindings(gwBridge.gwIface); err != nil { + if err := addHostMACBindings(gwBridge.GetGatewayIface()); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing: %w", err) } @@ -2547,7 +2548,7 @@ func newGateway( } func newNodePortWatcher( - gwBridge *bridgeConfiguration, + gwBridge *bridgeconfig.BridgeConfiguration, ofm *openflowManager, nodeIPManager *addressManager, watchFactory factory.NodeWatchFactory, @@ -2556,10 +2557,10 @@ func newNodePortWatcher( // Get ofport of physical interface ofportPhys, stderr, err := util.GetOVSOfPort("--if-exists", "get", - "interface", gwBridge.uplinkName, "ofport") + "interface", gwBridge.UplinkName, "ofport") if err != nil { return nil, fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - gwBridge.uplinkName, stderr, err) + gwBridge.UplinkName, stderr, err) } // In the shared gateway mode, the NodePort service is handled by the OpenFlow flows configured @@ -2597,11 +2598,11 @@ func newNodePortWatcher( subnets = append(subnets, config.Kubernetes.ServiceCIDRs...) if config.Gateway.DisableForwarding { if err := initExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) + return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) } } else { if err := delExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.gwIface, err) + return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) } } @@ -2612,14 +2613,14 @@ func newNodePortWatcher( } // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.ips) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.Ips) npw := &nodePortWatcher{ dpuMode: dpuMode, gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - gwBridge: gwBridge.bridgeName, + gwBridge: gwBridge.GetGatewayIface(), serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index d991fc74eb..04a5493f79 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -6,7 +6,6 @@ import ( "net" "slices" "strings" - "sync/atomic" "time" "github.com/vishvananda/netlink" @@ -24,6 +23,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/vrfmanager" @@ -92,147 +92,19 @@ type UserDefinedNetworkGateway struct { gwInterfaceIndex int } -// UTILS Needed for UDN (also leveraged for default netInfo) in bridgeConfiguration +// UTILS Needed for UDN (also leveraged for default netInfo) in BridgeConfiguration -// getBridgePortConfigurations returns a slice of Network port configurations along with the -// uplinkName and physical port's ofport value -func (b *bridgeConfiguration) getBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - b.Lock() - defer b.Unlock() - var netConfigs []*bridgeUDNConfiguration - for _, netConfig := range b.netConfig { - netConfigs = append(netConfigs, netConfig.shallowCopy()) - } - return netConfigs, b.uplinkName, b.ofPortPhys -} - -// addNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache -func (b *bridgeConfiguration) addNetworkBridgeConfig( - nInfo util.NetInfo, - nodeSubnets []*net.IPNet, - masqCTMark, pktMark uint, - v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - b.Lock() - defer b.Unlock() - - netName := nInfo.GetNetworkName() - patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) - - _, found := b.netConfig[netName] - if !found { - netConfig := &bridgeUDNConfiguration{ - patchPort: patchPort, - masqCTMark: fmt.Sprintf("0x%x", masqCTMark), - pktMark: fmt.Sprintf("0x%x", pktMark), - v4MasqIPs: v4MasqIPs, - v6MasqIPs: v6MasqIPs, - subnets: nInfo.Subnets(), - nodeSubnets: nodeSubnets, - } - netConfig.advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) - - b.netConfig[netName] = netConfig - } else { - klog.Warningf("Trying to update bridge config for network %s which already"+ - "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) - } - return nil -} - -// delNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache -func (b *bridgeConfiguration) delNetworkBridgeConfig(nInfo util.NetInfo) { - b.Lock() - defer b.Unlock() - - delete(b.netConfig, nInfo.GetNetworkName()) -} - -func (b *bridgeConfiguration) getNetworkBridgeConfig(networkName string) *bridgeUDNConfiguration { - b.Lock() - defer b.Unlock() - return b.netConfig[networkName] -} - -// getActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the -// provided netInfo. -// -// NOTE: if the network configuration can't be found or if the network is not patched by OVN -// yet this returns nil. -func (b *bridgeConfiguration) getActiveNetworkBridgeConfigCopy(networkName string) *bridgeUDNConfiguration { - b.Lock() - defer b.Unlock() - - if netConfig, found := b.netConfig[networkName]; found && netConfig.ofPortPatch != "" { - return netConfig.shallowCopy() - } - return nil -} - -func (b *bridgeConfiguration) patchedNetConfigs() []*bridgeUDNConfiguration { - result := make([]*bridgeUDNConfiguration, 0, len(b.netConfig)) - for _, netConfig := range b.netConfig { - if netConfig.ofPortPatch == "" { - continue - } - result = append(result, netConfig) - } - return result -} - -// END UDN UTILs for bridgeConfiguration - -// bridgeUDNConfiguration holds the patchport and ctMark -// information for a given network -type bridgeUDNConfiguration struct { - patchPort string - ofPortPatch string - masqCTMark string - pktMark string - v4MasqIPs *udn.MasqueradeIPs - v6MasqIPs *udn.MasqueradeIPs - subnets []config.CIDRNetworkEntry - nodeSubnets []*net.IPNet - advertised atomic.Bool -} - -func (netConfig *bridgeUDNConfiguration) shallowCopy() *bridgeUDNConfiguration { - copy := &bridgeUDNConfiguration{ - patchPort: netConfig.patchPort, - ofPortPatch: netConfig.ofPortPatch, - masqCTMark: netConfig.masqCTMark, - pktMark: netConfig.pktMark, - v4MasqIPs: netConfig.v4MasqIPs, - v6MasqIPs: netConfig.v6MasqIPs, - subnets: netConfig.subnets, - nodeSubnets: netConfig.nodeSubnets, - } - netConfig.advertised.Store(netConfig.advertised.Load()) - return copy -} - -func (netConfig *bridgeUDNConfiguration) isDefaultNetwork() bool { - return netConfig.masqCTMark == ctMarkOVN -} - -func (netConfig *bridgeUDNConfiguration) setBridgeNetworkOfPortsInternal() error { - ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.patchPort, "ofport") - if err != nil { - return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ - "while getting ofport. stderr: %v, error: %v", netConfig.patchPort, stderr, err) - } - netConfig.ofPortPatch = ofportPatch - return nil -} +// END UDN UTILs for BridgeConfiguration -func setBridgeNetworkOfPorts(bridge *bridgeConfiguration, netName string) error { +func setBridgeNetworkOfPorts(bridge *bridgeconfig.BridgeConfiguration, netName string) error { bridge.Lock() defer bridge.Unlock() - netConfig, found := bridge.netConfig[netName] + netConfig, found := bridge.NetConfig[netName] if !found { - return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, bridge.bridgeName) + return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, bridge.BridgeName) } - return netConfig.setBridgeNetworkOfPortsInternal() + return netConfig.SetBridgeNetworkOfPortsInternal() } func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeLister listers.NodeLister, @@ -268,7 +140,7 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeL if gw.openflowManager == nil { return nil, fmt.Errorf("openflow manager has not been provided for network: %s", netInfo.GetNetworkName()) } - intfName := gw.openflowManager.defaultBridge.gwIface + intfName := gw.openflowManager.defaultBridge.GetGatewayIface() link, err := util.GetNetLinkOps().LinkByName(intfName) if err != nil { return nil, fmt.Errorf("unable to get link for %s, error: %v", intfName, err) @@ -743,7 +615,7 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) var retVal []netlink.Route var defaultAnyCIDR *net.IPNet - for _, nextHop := range udng.gateway.openflowManager.defaultBridge.nextHops { + for _, nextHop := range udng.gateway.openflowManager.defaultBridge.NextHops { isV6 := utilnet.IsIPv6(nextHop) _, defaultAnyCIDR, _ = net.ParseCIDR("0.0.0.0/0") if isV6 { @@ -935,11 +807,11 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // update bridge configuration isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) - netConfig := udng.openflowManager.defaultBridge.getNetworkBridgeConfig(udng.GetNetworkName()) + netConfig := udng.openflowManager.defaultBridge.GetNetworkBridgeConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } - netConfig.advertised.Store(isNetworkAdvertised) + netConfig.Advertised.Store(isNetworkAdvertised) if err := udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating ip rule for UDN %s: %s", udng.GetNetworkName(), err) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 575d8bc9c8..d26cf16910 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -32,6 +32,7 @@ import ( factoryMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" @@ -237,12 +238,12 @@ func openflowManagerCheckPorts(ofMgr *openflowManager) { GinkgoHelper() netConfigs, uplink, ofPortPhys := ofMgr.getDefaultBridgePortConfigurations() sort.SliceStable(netConfigs, func(i, j int) bool { - return netConfigs[i].patchPort < netConfigs[j].patchPort + return netConfigs[i].PatchPort < netConfigs[j].PatchPort }) Expect(checkPorts(netConfigs, uplink, ofPortPhys)).To(Succeed()) } -func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { +func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeconfig.BridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) var masqIP string @@ -270,7 +271,7 @@ func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNCo ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { nTable0UDNMasqFlows++ } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", - bridgeMAC, defaultConfig.ofPortPatch)) { + bridgeMAC, defaultConfig.OfPortPatch)) { nTable2Flows++ } } @@ -280,7 +281,7 @@ func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNCo Expect(nTable2Flows).To(Equal(1)) } -func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { +func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeconfig.BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { By(fmt.Sprintf("Checking advertsised UDN %s service isolation flows for %s; expected %d flows", netName, svcCIDR.String(), expectedNFlows)) @@ -288,7 +289,7 @@ func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDN var protoPrefix string var udnAdvertisedSubnets []*net.IPNet var err error - for _, clusterEntry := range netConfig.subnets { + for _, clusterEntry := range netConfig.Subnets { udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) } if utilnet.IsIPv4CIDR(svcCIDR) { @@ -316,17 +317,17 @@ func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDN Expect(nFlows).To(Equal(expectedNFlows)) } -func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { +func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeconfig.BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", netName, svcCIDR.String(), expectedNFlows)) var mgmtMasqIP string var protoPrefix string if utilnet.IsIPv4CIDR(svcCIDR) { - mgmtMasqIP = netConfig.v4MasqIPs.ManagementPort.IP.String() + mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() protoPrefix = "ip" } else { - mgmtMasqIP = netConfig.v6MasqIPs.ManagementPort.IP.String() + mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() protoPrefix = "ip6" } @@ -342,9 +343,9 @@ func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfigurat } func getDummyOpenflowManager() *openflowManager { - gwBridge := &bridgeConfiguration{ - gwIface: "breth0", - bridgeName: "breth0", + gwBridge := &bridgeconfig.BridgeConfiguration{ + GwIface: "breth0", + BridgeName: "breth0", } ofm := &openflowManager{ defaultBridge: gwBridge, @@ -769,22 +770,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -810,7 +811,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1000,22 +1001,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -1041,7 +1042,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1241,22 +1242,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] + bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() + ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -1282,7 +1283,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1482,7 +1483,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() - ofm.defaultBridge.nextHops = ovntest.MustParseIPs(config.Gateway.NextHop) + ofm.defaultBridge.NextHops = ovntest.MustParseIPs(config.Gateway.NextHop) udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, &gateway{openflowManager: ofm}) Expect(err).NotTo(HaveOccurred()) mplink, err := netlink.LinkByName(mgtPort) diff --git a/go-controller/pkg/node/helper_linux.go b/go-controller/pkg/node/helper_linux.go index 5e55173a4a..8b46f05315 100644 --- a/go-controller/pkg/node/helper_linux.go +++ b/go-controller/pkg/node/helper_linux.go @@ -153,23 +153,6 @@ func getDefaultGatewayInterfaceByFamily(family int, gwIface string) (string, net return "", net.IP{}, nil } -func getIntfName(gatewayIntf string) (string, error) { - // The given (or autodetected) interface is an OVS bridge and this could be - // created by us using util.NicToBridge() or it was pre-created by the user. - - // Is intfName a port of gatewayIntf? - intfName, err := util.GetNicName(gatewayIntf) - if err != nil { - return "", err - } - _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") - if err != nil { - return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - intfName, stderr, err) - } - return intfName, nil -} - // filterRoutesByIfIndex is a helper function that will sieve the provided routes and check // if they match the provided index. This used to be implemented with netlink.RT_FILTER_OIF, // however the problem is that this filtered out MultiPath IPv6 routes which have a LinkIndex of 0. diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index a0c5ab21e8..a6945531e4 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -20,6 +20,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -37,21 +38,21 @@ type addressManager struct { syncPeriod time.Duration // compare node primary IP change nodePrimaryAddr net.IP - gatewayBridge *bridgeConfiguration + gatewayBridge *bridgeconfig.BridgeConfiguration OnChanged func() sync.Mutex } // initializes a new address manager which will hold all the IPs on a node -func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration) *addressManager { +func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration) *addressManager { return newAddressManagerInternal(nodeName, k, mgmtPort, watchFactory, gwBridge, true) } // newAddressManagerInternal creates a new address manager; this function is // only expose for testcases to disable netlink subscription to ensure // reproducibility of unit tests. -func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration, useNetlink bool) *addressManager { +func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration, useNetlink bool) *addressManager { mgr := &addressManager{ nodeName: nodeName, watchFactory: watchFactory, @@ -74,7 +75,7 @@ func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort manag } if useNetlink { // get updated interface IP addresses for the gateway bridge - ifAddrs, err = gwBridge.updateInterfaceIPAddresses(node) + ifAddrs, err = gwBridge.UpdateInterfaceIPAddresses(node) if err != nil { klog.Errorf("Failed to obtain interface IP addresses for node %s: %v", nodeName, err) return nil @@ -278,7 +279,7 @@ func (c *addressManager) updateNodeAddressAnnotations() error { if c.useNetlink { // get updated interface IP addresses for the gateway bridge - ifAddrs, err = c.gatewayBridge.updateInterfaceIPAddresses(node) + ifAddrs, err = c.gatewayBridge.UpdateInterfaceIPAddresses(node) if err != nil { return err } @@ -437,7 +438,7 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { // Two methods to lookup EIPs assigned to the gateway bridge. Fast path from a shared cache or slow path from node annotations. // At startup, gateway bridge cache gets sync - if c.gatewayBridge.eipMarkIPs != nil && c.gatewayBridge.eipMarkIPs.HasSyncdOnce() && c.gatewayBridge.eipMarkIPs.IsIPPresent(addr) { + if c.gatewayBridge.EipMarkIPs != nil && c.gatewayBridge.EipMarkIPs.HasSyncdOnce() && c.gatewayBridge.EipMarkIPs.IsIPPresent(addr) { return false } else { if eipAddresses, err := c.getPrimaryHostEgressIPs(); err != nil { diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index ee10bbfc41..35264a1288 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -21,6 +21,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" nodemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" @@ -401,7 +402,7 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { mpmock := &nodemocks.ManagementPort{} mpmock.On("GetAddresses").Return([]*net.IPNet{tc.mgmtPortIP4, tc.mgmtPortIP6}) - fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0", gwIface: "breth0"} + fakeBridgeConfiguration := &bridgeconfig.BridgeConfiguration{BridgeName: "breth0", GwIface: "breth0"} k := &kube.Kube{KClient: tc.fakeClient} tc.ipManager = newAddressManagerInternal(nodeName, k, mpmock, tc.watchFactory, fakeBridgeConfiguration, useNetlink) diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 96b55a52e1..3eaa8a298f 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -13,13 +13,14 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) type openflowManager struct { - defaultBridge *bridgeConfiguration - externalGatewayBridge *bridgeConfiguration + defaultBridge *bridgeconfig.BridgeConfiguration + externalGatewayBridge *bridgeconfig.BridgeConfiguration // flow cache, use map instead of array for readability when debugging flowCache map[string][]string flowMutex sync.Mutex @@ -31,20 +32,20 @@ type openflowManager struct { // UTILs Needed for UDN (also leveraged for default netInfo) in openflowmanager -func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - return c.defaultBridge.getBridgePortConfigurations() +func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { + return c.defaultBridge.GetBridgePortConfigurations() } -func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - return c.externalGatewayBridge.getBridgePortConfigurations() +func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { + return c.externalGatewayBridge.GetBridgePortConfigurations() } func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - if err := c.defaultBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.defaultBridge.AddNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } if c.externalGatewayBridge != nil { - if err := c.externalGatewayBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.externalGatewayBridge.AddNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } } @@ -52,14 +53,14 @@ func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNe } func (c *openflowManager) delNetwork(nInfo util.NetInfo) { - c.defaultBridge.delNetworkBridgeConfig(nInfo) + c.defaultBridge.DelNetworkBridgeConfig(nInfo) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.delNetworkBridgeConfig(nInfo) + c.externalGatewayBridge.DelNetworkBridgeConfig(nInfo) } } -func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeUDNConfiguration { - return c.defaultBridge.getActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) +func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeconfig.BridgeUDNConfiguration { + return c.defaultBridge.GetActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) } // END UDN UTILs @@ -67,19 +68,19 @@ func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeUDNConfigu func (c *openflowManager) getDefaultBridgeName() string { c.defaultBridge.Lock() defer c.defaultBridge.Unlock() - return c.defaultBridge.bridgeName + return c.defaultBridge.BridgeName } func (c *openflowManager) getDefaultBridgeMAC() net.HardwareAddr { c.defaultBridge.Lock() defer c.defaultBridge.Unlock() - return c.defaultBridge.macAddress + return c.defaultBridge.MacAddress } func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { c.defaultBridge.Lock() defer c.defaultBridge.Unlock() - c.defaultBridge.macAddress = macAddr + c.defaultBridge.MacAddress = macAddr } func (c *openflowManager) updateFlowCacheEntry(key string, flows []string) { @@ -128,7 +129,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.bridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.BridgeName, flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.flowCache) } @@ -145,7 +146,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.bridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.BridgeName, flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.exGWFlowCache) } @@ -160,7 +161,7 @@ func (c *openflowManager) syncFlows() { // // -- to handle host -> service access, via masquerading from the host to OVN GR // -- to handle external -> service(ExternalTrafficPolicy: Local) -> host access without SNAT -func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeConfiguration) (*openflowManager, error) { +func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeconfig.BridgeConfiguration) (*openflowManager, error) { // add health check function to check default OpenFlow flows are on the shared gateway bridge ofm := &openflowManager{ defaultBridge: gwBridge, @@ -262,25 +263,25 @@ func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets [] return nil } -func checkPorts(netConfigs []*bridgeUDNConfiguration, physIntf, ofPortPhys string) error { +func checkPorts(netConfigs []*bridgeconfig.BridgeUDNConfiguration, physIntf, ofPortPhys string) error { // it could be that the ovn-controller recreated the patch between the host OVS bridge and // the integration bridge, as a result the ofport number changed for that patch interface for _, netConfig := range netConfigs { - if netConfig.ofPortPatch == "" { + if netConfig.OfPortPatch == "" { continue } - curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.patchPort, "ofport") + curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.PatchPort, "ofport") if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.patchPort, stderr, err) + return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.PatchPort, stderr, err) } - if netConfig.ofPortPatch != curOfportPatch { - if netConfig.isDefaultNetwork() { + if netConfig.OfPortPatch != curOfportPatch { + if netConfig.IsDefaultNetwork() { klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", - netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) + netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) os.Exit(1) } else { - klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) + klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) } } } diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go new file mode 100644 index 0000000000..64f4f15cf6 --- /dev/null +++ b/go-controller/pkg/node/types/const.go @@ -0,0 +1,6 @@ +package types + +const ( + // CtMarkOVN is the conntrack mark value for OVN traffic + CtMarkOVN = "0x1" +) diff --git a/go-controller/pkg/node/util/util.go b/go-controller/pkg/node/util/util.go new file mode 100644 index 0000000000..9ad21a9a8e --- /dev/null +++ b/go-controller/pkg/node/util/util.go @@ -0,0 +1,92 @@ +package util + +import ( + "fmt" + "net" + + net2 "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + pkgutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// GetNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. +func GetNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { + allIPs, err := pkgutil.GetFilteredInterfaceV4V6IPs(iface) + if err != nil { + return nil, fmt.Errorf("could not find IP addresses: %v", err) + } + + var ips []*net.IPNet + var foundIPv4 bool + var foundIPv6 bool + for _, ip := range allIPs { + if net2.IsIPv6CIDR(ip) { + if config.IPv6Mode && !foundIPv6 { + // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet + // in the routing table + subnetIP, err := pkgutil.GetIPv6OnSubnet(iface, ip) + if err != nil { + return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) + } + ips = append(ips, subnetIP) + foundIPv6 = true + } + } else if config.IPv4Mode && !foundIPv4 { + ips = append(ips, ip) + foundIPv4 = true + } + } + if config.IPv4Mode && !foundIPv4 { + return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) + } else if config.IPv6Mode && !foundIPv6 { + return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) + } + return ips, nil +} + +// GetDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP +// and DPU IP subnet overriden by config config.Gateway.RouterSubnet +func GetDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { + // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information + // for each node. + var gwIps []*net.IPNet + isIPv4 := net2.IsIPv4(k8sNodeIP) + + // override subnet mask via config + if config.Gateway.RouterSubnet != "" { + _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) + if err != nil { + return nil, err + } + if net2.IsIPv4CIDR(addr) != isIPv4 { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "does not match Node IP address format", config.Gateway.RouterSubnet) + } + if !addr.Contains(k8sNodeIP) { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) + } + addr.IP = k8sNodeIP + gwIps = append(gwIps, addr) + } else { + // Assume Host and DPU share the same subnet + // in this case just update the matching IPNet with the Host's IP address + for _, addr := range ifAddrs { + if net2.IsIPv4CIDR(addr) != isIPv4 { + continue + } + // expect k8s Node IP to be contained in the given subnet + if !addr.Contains(k8sNodeIP) { + continue + } + newAddr := *addr + newAddr.IP = k8sNodeIP + gwIps = append(gwIps, &newAddr) + } + if len(gwIps) == 0 { + return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) + } + } + return gwIps, nil +} diff --git a/go-controller/pkg/node/util/util_suite_test.go b/go-controller/pkg/node/util/util_suite_test.go new file mode 100644 index 0000000000..dc2d625792 --- /dev/null +++ b/go-controller/pkg/node/util/util_suite_test.go @@ -0,0 +1,13 @@ +package util + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNodeSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node util Suite") +} diff --git a/go-controller/pkg/node/util/util_test.go b/go-controller/pkg/node/util/util_test.go new file mode 100644 index 0000000000..5ca6cc80a3 --- /dev/null +++ b/go-controller/pkg/node/util/util_test.go @@ -0,0 +1,57 @@ +package util + +import ( + "net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("node util tests", func() { + BeforeEach(func() { + Expect(config.PrepareTestConfig()).To(Succeed()) + }) + + Context("GetDPUHostPrimaryIPAddresses", func() { + + It("returns Gateway IP/Subnet for kubernetes node IP", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, + } + gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in host subnets", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.1.11") + _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + + It("returns node IP with config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.1.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, + } + gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + }) +}) From 420d9f1cfd64a828187742f693a44487d6323f1c Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:23:07 +0200 Subject: [PATCH 123/181] [bridgeconfig] make mutex a public field to turn it into internal later Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 26 +++++++------- go-controller/pkg/node/gateway_shared_intf.go | 26 +++++++------- go-controller/pkg/node/gateway_udn.go | 4 +-- go-controller/pkg/node/openflow_manager.go | 36 +++++++++---------- 4 files changed, 46 insertions(+), 46 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index c3f3beae32..257b8a7059 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -63,7 +63,7 @@ func (netConfig *BridgeUDNConfiguration) SetBridgeNetworkOfPortsInternal() error } type BridgeConfiguration struct { - sync.Mutex + Mutex sync.Mutex NodeName string BridgeName string UplinkName string @@ -89,8 +89,8 @@ func (b *BridgeConfiguration) GetGatewayIface() string { // UpdateInterfaceIPAddresses sets and returns the bridge's current ips func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) if err != nil { return nil, err @@ -272,8 +272,8 @@ func getRepresentor(intfName string) (string, error) { // GetBridgePortConfigurations returns a slice of Network port configurations along with the // uplinkName and physical port's ofport value func (b *BridgeConfiguration) GetBridgePortConfigurations() ([]*BridgeUDNConfiguration, string, string) { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() var netConfigs []*BridgeUDNConfiguration for _, netConfig := range b.NetConfig { netConfigs = append(netConfigs, netConfig.ShallowCopy()) @@ -287,8 +287,8 @@ func (b *BridgeConfiguration) AddNetworkBridgeConfig( nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() netName := nInfo.GetNetworkName() patchPort := nInfo.GetNetworkScopedPatchPortName(b.BridgeName, b.NodeName) @@ -316,15 +316,15 @@ func (b *BridgeConfiguration) AddNetworkBridgeConfig( // DelNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache func (b *BridgeConfiguration) DelNetworkBridgeConfig(nInfo util.NetInfo) { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() delete(b.NetConfig, nInfo.GetNetworkName()) } func (b *BridgeConfiguration) GetNetworkBridgeConfig(networkName string) *BridgeUDNConfiguration { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() return b.NetConfig[networkName] } @@ -334,8 +334,8 @@ func (b *BridgeConfiguration) GetNetworkBridgeConfig(networkName string) *Bridge // NOTE: if the network configuration can't be found or if the network is not patched by OVN // yet this returns nil. func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName string) *BridgeUDNConfiguration { - b.Lock() - defer b.Unlock() + b.Mutex.Lock() + defer b.Mutex.Unlock() if netConfig, found := b.NetConfig[networkName]; found && netConfig.OfPortPatch != "" { return netConfig.ShallowCopy() diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 6d467a1285..688f25c297 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -235,9 +235,9 @@ type cidrAndFlags struct { func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { // Get Physical IPs of Node, Can be IPV4 IPV6 or both - addressManager.gatewayBridge.Lock() + addressManager.gatewayBridge.Mutex.Lock() gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.Ips) - addressManager.gatewayBridge.Unlock() + addressManager.gatewayBridge.Mutex.Unlock() npw.gatewayIPLock.Lock() defer npw.gatewayIPLock.Unlock() @@ -2342,8 +2342,8 @@ func hostNetworkNormalActionFlows(netConfig *bridgeconfig.BridgeUDNConfiguration } func setBridgeOfPorts(bridge *bridgeconfig.BridgeConfiguration) error { - bridge.Lock() - defer bridge.Unlock() + bridge.Mutex.Lock() + defer bridge.Mutex.Unlock() // Get ofport of patchPort for _, netConfig := range bridge.NetConfig { if err := netConfig.SetBridgeNetworkOfPortsInternal(); err != nil { @@ -2422,37 +2422,37 @@ func newGateway( if exGwBridge != nil { gw.readyFunc = func() (bool, error) { - gwBridge.Lock() + gwBridge.Mutex.Lock() for _, netConfig := range gwBridge.NetConfig { ready, err := gatewayReady(netConfig.PatchPort) if err != nil || !ready { - gwBridge.Unlock() + gwBridge.Mutex.Unlock() return false, err } } - gwBridge.Unlock() - exGwBridge.Lock() + gwBridge.Mutex.Unlock() + exGwBridge.Mutex.Lock() for _, netConfig := range exGwBridge.NetConfig { exGWReady, err := gatewayReady(netConfig.PatchPort) if err != nil || !exGWReady { - exGwBridge.Unlock() + exGwBridge.Mutex.Unlock() return false, err } } - exGwBridge.Unlock() + exGwBridge.Mutex.Unlock() return true, nil } } else { gw.readyFunc = func() (bool, error) { - gwBridge.Lock() + gwBridge.Mutex.Lock() for _, netConfig := range gwBridge.NetConfig { ready, err := gatewayReady(netConfig.PatchPort) if err != nil || !ready { - gwBridge.Unlock() + gwBridge.Mutex.Unlock() return false, err } } - gwBridge.Unlock() + gwBridge.Mutex.Unlock() return true, nil } } diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 04a5493f79..827bfe6421 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -97,8 +97,8 @@ type UserDefinedNetworkGateway struct { // END UDN UTILs for BridgeConfiguration func setBridgeNetworkOfPorts(bridge *bridgeconfig.BridgeConfiguration, netName string) error { - bridge.Lock() - defer bridge.Unlock() + bridge.Mutex.Lock() + defer bridge.Mutex.Unlock() netConfig, found := bridge.NetConfig[netName] if !found { diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 3eaa8a298f..12547978f9 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -66,20 +66,20 @@ func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeconfig.Bri // END UDN UTILs func (c *openflowManager) getDefaultBridgeName() string { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() return c.defaultBridge.BridgeName } func (c *openflowManager) getDefaultBridgeMAC() net.HardwareAddr { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() return c.defaultBridge.MacAddress } func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() c.defaultBridge.MacAddress = macAddr } @@ -118,8 +118,8 @@ func (c *openflowManager) requestFlowSync() { func (c *openflowManager) syncFlows() { // protect gwBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() c.flowMutex.Lock() defer c.flowMutex.Unlock() @@ -135,8 +135,8 @@ func (c *openflowManager) syncFlows() { } if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() + c.externalGatewayBridge.Mutex.Lock() + defer c.externalGatewayBridge.Mutex.Unlock() c.exGWFlowMutex.Lock() defer c.exGWFlowMutex.Unlock() @@ -213,14 +213,14 @@ func (c *openflowManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []string) { // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() dftFlows := pmtudDropFlows(c.defaultBridge, ipAddrs) c.updateFlowCacheEntry(key, dftFlows) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() + c.externalGatewayBridge.Mutex.Lock() + defer c.externalGatewayBridge.Mutex.Unlock() exGWBridgeDftFlows := pmtudDropFlows(c.externalGatewayBridge, ipAddrs) c.updateExBridgeFlowCacheEntry(key, exGWBridgeDftFlows) } @@ -230,8 +230,8 @@ func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []strin // note: this is shared between shared and local gateway modes func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets []*net.IPNet) error { // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() + c.defaultBridge.Mutex.Lock() + defer c.defaultBridge.Mutex.Unlock() // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! @@ -251,8 +251,8 @@ func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets [] // we consume ex gw bridge flows only if that is enabled if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() + c.externalGatewayBridge.Mutex.Lock() + defer c.externalGatewayBridge.Mutex.Unlock() c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) exGWBridgeDftFlows, err := commonFlows(hostSubnets, c.externalGatewayBridge) if err != nil { From 3b073327997c6952c165cff7a6ded6663d907e27 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:33:32 +0200 Subject: [PATCH 124/181] [bridgeconfig] only create BridgeConfigurations inside the package. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 2 +- .../bridgeconfig/bridgeconfig_testutil.go | 21 ++++++ .../default_node_network_controller_test.go | 66 ++++--------------- go-controller/pkg/node/gateway.go | 4 +- .../pkg/node/gateway_localnet_linux_test.go | 14 ++-- go-controller/pkg/node/gateway_udn_test.go | 5 +- .../pkg/node/node_ip_handler_linux_test.go | 2 +- 7 files changed, 42 insertions(+), 72 deletions(-) create mode 100644 go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 257b8a7059..a501c7b641 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -117,7 +117,7 @@ func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]* return ifAddrs, nil } -func BridgeForInterface(intfName, nodeName, +func NewBridgeConfiguration(intfName, nodeName, physicalNetworkName string, nodeSubnets, gwIPs []*net.IPNet, gwNextHops []net.IP, diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go new file mode 100644 index 0000000000..baad614fda --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -0,0 +1,21 @@ +package bridgeconfig + +import "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + +func TestDefaultBridgeConfig() *BridgeConfiguration { + defaultNetConfig := &BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", + } + return &BridgeConfiguration{ + NetConfig: map[string]*BridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + } +} + +func TestBridgeConfig(brName string) *BridgeConfiguration { + return &BridgeConfiguration{ + BridgeName: brName, + GwIface: brName, + } +} diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index de35b39e8d..a1413a7dd1 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -810,17 +810,10 @@ var _ = Describe("Node", func() { Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -922,17 +915,10 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1076,17 +1062,10 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1187,17 +1166,10 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1355,17 +1327,10 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1483,17 +1448,10 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index a617249c52..bf28fbb058 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -357,13 +357,13 @@ func setupUDPAggregationUplink(ifname string) error { func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops []net.IP, nodeSubnets, gwIPs []*net.IPNet, advertised bool, nodeAnnotator kube.Annotator) ( *bridgeconfig.BridgeConfiguration, *bridgeconfig.BridgeConfiguration, error) { - gatewayBridge, err := bridgeconfig.BridgeForInterface(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) + gatewayBridge, err := bridgeconfig.NewBridgeConfiguration(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", gwIntf, err) } var egressGWBridge *bridgeconfig.BridgeConfiguration if egressGatewayIntf != "" { - egressGWBridge, err = bridgeconfig.BridgeForInterface(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) + egressGWBridge, err = bridgeconfig.NewBridgeConfiguration(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", egressGatewayIntf, err) } diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index 87ef3aa72c..d259bc14e3 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -57,9 +57,8 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) - defaultNetConfig := &bridgeconfig.BridgeUDNConfiguration{ - OfPortPatch: "patch-breth0_ov", - } + defaultBridge := bridgeconfig.TestDefaultBridgeConfig() + defaultBridge.MacAddress = gwMACParsed fNPW := nodePortWatcher{ ofportPhys: "eth0", @@ -67,13 +66,8 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gatewayIPv6: v6localnetGatewayIP, serviceInfo: make(map[k8stypes.NamespacedName]*serviceConfig), ofm: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeconfig.BridgeConfiguration{ - MacAddress: gwMACParsed, - NetConfig: map[string]*bridgeconfig.BridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: defaultBridge, }, networkManager: networkmanager.Default().Interface(), } diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index d26cf16910..8f4082d1c5 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -343,10 +343,7 @@ func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeconfig.Bridge } func getDummyOpenflowManager() *openflowManager { - gwBridge := &bridgeconfig.BridgeConfiguration{ - GwIface: "breth0", - BridgeName: "breth0", - } + gwBridge := bridgeconfig.TestBridgeConfig("breth0") ofm := &openflowManager{ defaultBridge: gwBridge, } diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index 35264a1288..aa819cdb8a 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -402,7 +402,7 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { mpmock := &nodemocks.ManagementPort{} mpmock.On("GetAddresses").Return([]*net.IPNet{tc.mgmtPortIP4, tc.mgmtPortIP6}) - fakeBridgeConfiguration := &bridgeconfig.BridgeConfiguration{BridgeName: "breth0", GwIface: "breth0"} + fakeBridgeConfiguration := bridgeconfig.TestBridgeConfig("breth0") k := &kube.Kube{KClient: tc.fakeClient} tc.ipManager = newAddressManagerInternal(nodeName, k, mpmock, tc.watchFactory, fakeBridgeConfiguration, useNetlink) From a4d421a378217ff0a0b22314d9d6160f9c9d6740 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:37:56 +0200 Subject: [PATCH 125/181] [bridgeconfig] simply move functions around, no change Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index a501c7b641..59d16255c7 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -79,44 +79,6 @@ type BridgeConfiguration struct { NextHops []net.IP } -func (b *BridgeConfiguration) GetGatewayIface() string { - // If GwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. - if b.GwIface != "" { - return b.GwIface - } - return b.BridgeName -} - -// UpdateInterfaceIPAddresses sets and returns the bridge's current ips -func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.Mutex.Lock() - defer b.Mutex.Unlock() - ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) - if err != nil { - return nil, err - } - - // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead of the DPU's external bridge IP address. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - nodeAddrStr, err := util.GetNodePrimaryIP(node) - if err != nil { - return nil, err - } - nodeAddr := net.ParseIP(nodeAddrStr) - if nodeAddr == nil { - return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) - } - ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) - if err != nil { - return nil, err - } - } - - b.Ips = ifAddrs - return ifAddrs, nil -} - func NewBridgeConfiguration(intfName, nodeName, physicalNetworkName string, nodeSubnets, gwIPs []*net.IPNet, @@ -260,13 +222,42 @@ func NewBridgeConfiguration(intfName, nodeName, return &res, nil } -func getRepresentor(intfName string) (string, error) { - deviceID, err := util.GetDeviceIDFromNetdevice(intfName) +func (b *BridgeConfiguration) GetGatewayIface() string { + // If GwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. + if b.GwIface != "" { + return b.GwIface + } + return b.BridgeName +} + +// UpdateInterfaceIPAddresses sets and returns the bridge's current ips +func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) if err != nil { - return "", err + return nil, err } - return util.GetFunctionRepresentorName(deviceID) + // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's + // host internal IP address instead of the DPU's external bridge IP address. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + nodeAddrStr, err := util.GetNodePrimaryIP(node) + if err != nil { + return nil, err + } + nodeAddr := net.ParseIP(nodeAddrStr) + if nodeAddr == nil { + return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) + } + ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) + if err != nil { + return nil, err + } + } + + b.Ips = ifAddrs + return ifAddrs, nil } // GetBridgePortConfigurations returns a slice of Network port configurations along with the @@ -421,3 +412,12 @@ func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) ( ifaceID := bridgeName + "_" + nodeName return ifaceID, nil } + +func getRepresentor(intfName string) (string, error) { + deviceID, err := util.GetDeviceIDFromNetdevice(intfName) + if err != nil { + return "", err + } + + return util.GetFunctionRepresentorName(deviceID) +} From cf93ef303ca96a6c5a249508c31e13b9729fac38 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:43:41 +0200 Subject: [PATCH 126/181] [bridgeconfig] start moving methods that use internal mutex to the pkg Update gatewayReady function to only return bool as it always returns nil error. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 24 +++++++++++++++ go-controller/pkg/node/gateway.go | 10 ------- go-controller/pkg/node/gateway_shared_intf.go | 30 ++++--------------- 3 files changed, 30 insertions(+), 34 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 59d16255c7..7ef6236c9a 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -345,6 +345,30 @@ func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { return result } +// IsGatewayReady checks if patch ports of every netConfig are present. +// used by gateway on newGateway readyFunc +func (b *BridgeConfiguration) IsGatewayReady() bool { + b.Mutex.Lock() + defer b.Mutex.Unlock() + for _, netConfig := range b.NetConfig { + ready := gatewayReady(netConfig.PatchPort) + if !ready { + return false + } + } + return true +} + +func gatewayReady(patchPort string) bool { + // Get ofport of patchPort + ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") + if err != nil || len(ofport) == 0 { + return false + } + klog.Info("Gateway is ready") + return true +} + func getIntfName(gatewayIntf string) (string, error) { // The given (or autodetected) interface is an OVS bridge and this could be // created by us using util.NicToBridge() or it was pre-created by the user. diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index bf28fbb058..948da997d5 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -452,16 +452,6 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops return gatewayBridge, egressGWBridge, err } -func gatewayReady(patchPort string) (bool, error) { - // Get ofport of patchPort - ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") - if err != nil || len(ofport) == 0 { - return false, nil - } - klog.Info("Gateway is ready") - return true, nil -} - func (g *gateway) GetGatewayBridgeIface() string { return g.openflowManager.getDefaultBridgeName() } diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 688f25c297..9a1ae075c2 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -2422,37 +2422,19 @@ func newGateway( if exGwBridge != nil { gw.readyFunc = func() (bool, error) { - gwBridge.Mutex.Lock() - for _, netConfig := range gwBridge.NetConfig { - ready, err := gatewayReady(netConfig.PatchPort) - if err != nil || !ready { - gwBridge.Mutex.Unlock() - return false, err - } + if !gwBridge.IsGatewayReady() { + return false, nil } - gwBridge.Mutex.Unlock() - exGwBridge.Mutex.Lock() - for _, netConfig := range exGwBridge.NetConfig { - exGWReady, err := gatewayReady(netConfig.PatchPort) - if err != nil || !exGWReady { - exGwBridge.Mutex.Unlock() - return false, err - } + if !exGwBridge.IsGatewayReady() { + return false, nil } - exGwBridge.Mutex.Unlock() return true, nil } } else { gw.readyFunc = func() (bool, error) { - gwBridge.Mutex.Lock() - for _, netConfig := range gwBridge.NetConfig { - ready, err := gatewayReady(netConfig.PatchPort) - if err != nil || !ready { - gwBridge.Mutex.Unlock() - return false, err - } + if !gwBridge.IsGatewayReady() { + return false, nil } - gwBridge.Mutex.Unlock() return true, nil } } From 836e0f64ffd874ccbdb46df1ec9f51946f824666 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:49:33 +0200 Subject: [PATCH 127/181] [bridgeconfig] move setBridgeOfPorts to the package. no changes to the function Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 48 +++++++++++++ go-controller/pkg/node/gateway_shared_intf.go | 68 +++---------------- go-controller/pkg/node/types/const.go | 2 + 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 7ef6236c9a..4fb433f4c8 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -359,6 +359,54 @@ func (b *BridgeConfiguration) IsGatewayReady() bool { return true } +func (b *BridgeConfiguration) SetOfPorts() error { + b.Mutex.Lock() + defer b.Mutex.Unlock() + // Get ofport of patchPort + for _, netConfig := range b.NetConfig { + if err := netConfig.SetBridgeNetworkOfPortsInternal(); err != nil { + return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) + } + } + + if b.UplinkName != "" { + // Get ofport of physical interface + ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", b.UplinkName, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + b.UplinkName, stderr, err) + } + b.OfPortPhys = ofportPhys + } + + // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + var stderr string + hostRep, err := util.GetDPUHostInterface(b.BridgeName) + if err != nil { + return err + } + + b.OfPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", + hostRep, stderr, err) + } + } else { + var err error + if b.GwIfaceRep != "" { + b.OfPortHost, _, err = util.RunOVSVsctl("get", "interface", b.GwIfaceRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", b.GwIfaceRep, err) + } + } else { + b.OfPortHost = nodetypes.OvsLocalPort + } + } + + return nil +} + func gatewayReady(patchPort string) bool { // Get ofport of patchPort ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 9a1ae075c2..d17992eb80 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -50,8 +50,6 @@ const ( // pmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, // fragmentation-needed (4) pmtudOpenFlowCookie = "0x0304" - // ovsLocalPort is the name of the OVS bridge local port - ovsLocalPort = "LOCAL" // ctMarkHost is the conntrack mark value for host traffic ctMarkHost = "0x2" @@ -391,7 +389,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI if err != nil { // in the odd case that getting all ports from the bridge should not work, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) - klog.Warningf("Unable to get port list from bridge. Using ovsLocalPort as output only: error: %v", + klog.Warningf("Unable to get port list from bridge. Using OvsLocalPort as output only: error: %v", err) } } @@ -575,7 +573,7 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) arpFlow = fmt.Sprintf("cookie=%s, priority=110, in_port=%s, %s, %s=%s, "+ "actions=output:%s", - cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, ovsLocalPort) + cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, nodetypes.OvsLocalPort) } else { // cover the case where breth0 has more than 3 ports, e.g. if an admin adds a 4th port // and the ExternalIP would be on that port @@ -1552,7 +1550,7 @@ func flowsForDefaultBridge(bridge *bridgeconfig.BridgeConfiguration, extraIPs [] // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ovsLocalPort, config.Default.EncapPort, ofPortPhys)) + "actions=output:%s", defaultOpenFlowCookie, nodetypes.OvsLocalPort, config.Default.EncapPort, ofPortPhys)) } physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) @@ -2157,7 +2155,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeconfig.BridgeConfigurat // but holding this until // https://issues.redhat.com/browse/FDP-646 is fixed, for now we // are assuming MEG & BGP are not used together - output = ovsLocalPort + output = nodetypes.OvsLocalPort } for _, clusterEntry := range netConfig.Subnets { cidr := clusterEntry.CIDR @@ -2175,7 +2173,7 @@ func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeconfig.BridgeConfigurat dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ "actions=output:%s", - defaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, ovsLocalPort), + defaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, nodetypes.OvsLocalPort), ) } } @@ -2307,7 +2305,7 @@ func hostNetworkNormalActionFlows(netConfig *bridgeconfig.BridgeUDNConfiguration if utilnet.IsIPv6(hostSubnet.IP) != isV6 { continue } - flows = append(flows, formatFlow(ovsLocalPort, hostSubnet.String(), ctMarkHost)) + flows = append(flows, formatFlow(nodetypes.OvsLocalPort, hostSubnet.String(), ctMarkHost)) } if isV6 { @@ -2335,60 +2333,12 @@ func hostNetworkNormalActionFlows(netConfig *bridgeconfig.BridgeUDNConfiguration // Traffic path (a) for ICMP: OVN->localnet for local gw mode // Traffic path (b) for ICMP: host->localnet for both gw modes - flows = append(flows, formatICMPFlow(ovsLocalPort, ctMarkHost, icmpType)) + flows = append(flows, formatICMPFlow(nodetypes.OvsLocalPort, ctMarkHost, icmpType)) } } return flows } -func setBridgeOfPorts(bridge *bridgeconfig.BridgeConfiguration) error { - bridge.Mutex.Lock() - defer bridge.Mutex.Unlock() - // Get ofport of patchPort - for _, netConfig := range bridge.NetConfig { - if err := netConfig.SetBridgeNetworkOfPortsInternal(); err != nil { - return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) - } - } - - if bridge.UplinkName != "" { - // Get ofport of physical interface - ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", bridge.UplinkName, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - bridge.UplinkName, stderr, err) - } - bridge.OfPortPhys = ofportPhys - } - - // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - var stderr string - hostRep, err := util.GetDPUHostInterface(bridge.BridgeName) - if err != nil { - return err - } - - bridge.OfPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", - hostRep, stderr, err) - } - } else { - var err error - if bridge.GwIfaceRep != "" { - bridge.OfPortHost, _, err = util.RunOVSVsctl("get", "interface", bridge.GwIfaceRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", bridge.GwIfaceRep, err) - } - } else { - bridge.OfPortHost = ovsLocalPort - } - } - - return nil -} - func newGateway( nodeName string, subnets []*net.IPNet, @@ -2443,12 +2393,12 @@ func newGateway( // Program cluster.GatewayIntf to let non-pod traffic to go to host // stack klog.Info("Creating Gateway Openflow Manager") - err := setBridgeOfPorts(gwBridge) + err := gwBridge.SetOfPorts() if err != nil { return err } if exGwBridge != nil { - err = setBridgeOfPorts(exGwBridge) + err = exGwBridge.SetOfPorts() if err != nil { return err } diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go index 64f4f15cf6..b486302dd6 100644 --- a/go-controller/pkg/node/types/const.go +++ b/go-controller/pkg/node/types/const.go @@ -3,4 +3,6 @@ package types const ( // CtMarkOVN is the conntrack mark value for OVN traffic CtMarkOVN = "0x1" + // OvsLocalPort is the name of the OVS bridge local port + OvsLocalPort = "LOCAL" ) From b607e93d4b63e063181d86ba64bd37b7eb0e53b9 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 14:57:07 +0200 Subject: [PATCH 128/181] [bridgeconfig] add some getters/setters with lock to the pkg. Make SetBridgeNetworkOfPortsInternal actually internal, rename to setOfPatchPort as it only updates patchPort. Also rename setBridgeNetworkOfPorts to SetNetworkOfPatchPort for the same reason. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 39 ++++++++++++++++++- go-controller/pkg/node/gateway_shared_intf.go | 4 +- go-controller/pkg/node/gateway_udn.go | 20 +--------- go-controller/pkg/node/openflow_manager.go | 12 ++---- 4 files changed, 43 insertions(+), 32 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 4fb433f4c8..0dd601cc24 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -52,7 +52,7 @@ func (netConfig *BridgeUDNConfiguration) IsDefaultNetwork() bool { return netConfig.MasqCTMark == nodetypes.CtMarkOVN } -func (netConfig *BridgeUDNConfiguration) SetBridgeNetworkOfPortsInternal() error { +func (netConfig *BridgeUDNConfiguration) setOfPatchPort() error { ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.PatchPort, "ofport") if err != nil { return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ @@ -364,7 +364,7 @@ func (b *BridgeConfiguration) SetOfPorts() error { defer b.Mutex.Unlock() // Get ofport of patchPort for _, netConfig := range b.NetConfig { - if err := netConfig.SetBridgeNetworkOfPortsInternal(); err != nil { + if err := netConfig.setOfPatchPort(); err != nil { return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) } } @@ -407,6 +407,41 @@ func (b *BridgeConfiguration) SetOfPorts() error { return nil } +func (b *BridgeConfiguration) GetIPs() []*net.IPNet { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.Ips +} + +func (b *BridgeConfiguration) GetBridgeName() string { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.BridgeName +} + +func (b *BridgeConfiguration) GetMAC() net.HardwareAddr { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.MacAddress +} + +func (b *BridgeConfiguration) SetMAC(macAddr net.HardwareAddr) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + b.MacAddress = macAddr +} + +func (b *BridgeConfiguration) SetNetworkOfPatchPort(netName string) error { + b.Mutex.Lock() + defer b.Mutex.Unlock() + + netConfig, found := b.NetConfig[netName] + if !found { + return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, b.BridgeName) + } + return netConfig.setOfPatchPort() +} + func gatewayReady(patchPort string) bool { // Get ofport of patchPort ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index d17992eb80..fb902c589c 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -233,9 +233,7 @@ type cidrAndFlags struct { func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { // Get Physical IPs of Node, Can be IPV4 IPV6 or both - addressManager.gatewayBridge.Mutex.Lock() - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.Ips) - addressManager.gatewayBridge.Mutex.Unlock() + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.GetIPs()) npw.gatewayIPLock.Lock() defer npw.gatewayIPLock.Unlock() diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 827bfe6421..a9d3b92d23 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -23,7 +23,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/vrfmanager" @@ -92,21 +91,6 @@ type UserDefinedNetworkGateway struct { gwInterfaceIndex int } -// UTILS Needed for UDN (also leveraged for default netInfo) in BridgeConfiguration - -// END UDN UTILs for BridgeConfiguration - -func setBridgeNetworkOfPorts(bridge *bridgeconfig.BridgeConfiguration, netName string) error { - bridge.Mutex.Lock() - defer bridge.Mutex.Unlock() - - netConfig, found := bridge.NetConfig[netName] - if !found { - return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, bridge.BridgeName) - } - return netConfig.SetBridgeNetworkOfPortsInternal() -} - func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeLister listers.NodeLister, kubeInterface kube.Interface, vrfManager *vrfmanager.Controller, ruleManager *iprulemanager.Controller, defaultNetworkGateway Gateway) (*UserDefinedNetworkGateway, error) { @@ -270,12 +254,12 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { waiter := newStartupWaiterWithTimeout(waitForPatchPortTimeout) readyFunc := func() (bool, error) { - if err := setBridgeNetworkOfPorts(udng.openflowManager.defaultBridge, udng.GetNetworkName()); err != nil { + if err := udng.openflowManager.defaultBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for default bridge; error: %v", udng.GetNetworkName(), err) return false, nil } if udng.openflowManager.externalGatewayBridge != nil { - if err := setBridgeNetworkOfPorts(udng.openflowManager.externalGatewayBridge, udng.GetNetworkName()); err != nil { + if err := udng.openflowManager.externalGatewayBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for secondary bridge; error: %v", udng.GetNetworkName(), err) return false, nil } diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 12547978f9..0b96b2186f 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -66,21 +66,15 @@ func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeconfig.Bri // END UDN UTILs func (c *openflowManager) getDefaultBridgeName() string { - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - return c.defaultBridge.BridgeName + return c.defaultBridge.GetBridgeName() } func (c *openflowManager) getDefaultBridgeMAC() net.HardwareAddr { - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - return c.defaultBridge.MacAddress + return c.defaultBridge.GetMAC() } func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - c.defaultBridge.MacAddress = macAddr + c.defaultBridge.SetMAC(macAddr) } func (c *openflowManager) updateFlowCacheEntry(key string, flows []string) { From 28f9c1eccc0e72fadfee4c19db00f0bb02748223 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 16:09:39 +0200 Subject: [PATCH 129/181] [bridgeconfig] move bridge flows generation functions to the pkg. These functions use bridge lock, will convert them to BridgeConfiguration methods later. Move test functions related to flow generation. Signed-off-by: Nadia Pinaeva --- .../bridgeconfig/bridgeconfig_testutil.go | 114 +- .../pkg/node/bridgeconfig/bridgeflows.go | 946 +++++++++++++++++ .../node/default_node_network_controller.go | 3 +- go-controller/pkg/node/egress_service_test.go | 11 +- go-controller/pkg/node/gateway.go | 5 - go-controller/pkg/node/gateway_shared_intf.go | 974 +----------------- go-controller/pkg/node/gateway_udn_test.go | 124 +-- go-controller/pkg/node/openflow_manager.go | 15 +- go-controller/pkg/node/types/const.go | 14 + go-controller/pkg/node/util/util.go | 26 + 10 files changed, 1131 insertions(+), 1101 deletions(-) create mode 100644 go-controller/pkg/node/bridgeconfig/bridgeflows.go diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go index baad614fda..271c555e7e 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -1,6 +1,19 @@ package bridgeconfig -import "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +import ( + "fmt" + "net" + "strings" + + net2 "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) func TestDefaultBridgeConfig() *BridgeConfiguration { defaultNetConfig := &BridgeUDNConfiguration{ @@ -19,3 +32,102 @@ func TestBridgeConfig(brName string) *BridgeConfiguration { GwIface: brName, } } + +func CheckUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var mgmtMasqIP string + var protoPrefix string + if net2.IsIPv4CIDR(svcCIDR) { + mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip" + } else { + mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, mgmtMasqIP)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + +func CheckAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking advertised UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var matchingIPFamilySubnet *net.IPNet + var protoPrefix string + var udnAdvertisedSubnets []*net.IPNet + var err error + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + if net2.IsIPv4CIDR(svcCIDR) { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip" + } else { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, matchingIPFamilySubnet)) { + nFlows++ + } + if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", + protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + +func CheckDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *BridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { + By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) + + var masqIP string + var masqSubnet string + var protoPrefix string + if net2.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ip6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + var nTable0DefaultFlows int + var nTable0UDNMasqFlows int + var nTable2Flows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", + ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, + masqIP)) { + nTable0DefaultFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", + ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { + nTable0UDNMasqFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", + bridgeMAC, defaultConfig.OfPortPatch)) { + nTable2Flows++ + } + } + + Expect(nTable0DefaultFlows).To(Equal(1)) + Expect(nTable0UDNMasqFlows).To(Equal(1)) + Expect(nTable2Flows).To(Equal(1)) +} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go new file mode 100644 index 0000000000..5a3467ae21 --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -0,0 +1,946 @@ +package bridgeconfig + +import ( + "fmt" + "net" + + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + + ofPortPhys := bridge.OfPortPhys + bridgeMacAddress := bridge.MacAddress.String() + ofPortHost := bridge.OfPortHost + bridgeIPs := bridge.Ips + + var dftFlows []string + // 14 bytes of overhead for ethernet header (does not include VLAN) + maxPktLength := getMaxFrameLength() + + strip_vlan := "" + mod_vlan_id := "" + match_vlan := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if config.IPv4Mode { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) + } + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + for _, netConfig := range bridge.PatchedNetConfigs() { + // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() == nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { + continue + } + + for _, netConfig := range bridge.PatchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + if config.IPv6Mode { + if ofPortPhys != "" { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, nodetypes.OvsLocalPort, config.Default.EncapPort, ofPortPhys)) + } + + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 + for _, netConfig := range bridge.PatchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() != nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { + continue + } + + for _, netConfig := range bridge.PatchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + + var protoPrefix, masqIP, masqSubnet string + + // table 0, packets coming from Host -> Service + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ipv6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) + + if util.IsNetworkSegmentationSupportEnabled() { + // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. + // For packets originating from UDN, commit without NATing, those + // have already been SNATed to the masq IP of the UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + if util.IsRouteAdvertisementsEnabled() { + // If the UDN is advertised then instead of matching on the masqSubnet + // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 + // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 + // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + if netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) + continue + } + + // Use the filtered subnet for the flow compute instead of the masqueradeIP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + } + } + } + } + + masqDst := masqIP + if util.IsNetworkSegmentationSupportEnabled() { + // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services + masqDst = masqSubnet + } + for _, netConfig := range bridge.PatchedNetConfigs() { + // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ + "actions=ct(zone=%d,nat,table=3)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR, + protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) + // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either + // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. + // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ + "actions=drop", nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR)) + } + } + + // table 0, add IP fragment reassembly flows, only needed in SGW mode with + // physical interface attached to bridge + if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { + reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) + dftFlows = append(dftFlows, reassemblyFlows...) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.PatchedNetConfigs() { + var actions string + if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { + actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) + } else { + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) + } + + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + } + + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + } + } + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + } + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + } + + defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] + + // table 2, dispatch from Host -> OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=2, "+ + "actions=set_field:%s->eth_dst,%soutput:%s", nodetypes.DefaultOpenFlowCookie, + bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) + + // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have + // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. + if config.IPv4Mode { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + srcIPOrSubnet := netConfig.V4MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) + continue + } + + // Use the filtered subnets for the flow compute instead of the masqueradeIP + srcIPOrSubnet = matchingIPFamilySubnet.String() + } + // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that + // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to + // a service in another UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) + } + } + + if config.IPv6Mode { + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + srcIPOrSubnet := netConfig.V6MasqIPs.ManagementPort.IP.String() + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) + continue + } + + // Use the filtered subnets for the flow compute instead of the masqueradeIP + srcIPOrSubnet = matchingIPFamilySubnet.String() + } + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) + } + } + + // table 3, dispatch from OVN -> Host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=3, %s "+ + "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + // table 4, hairpinned pkts that need to go from OVN -> Host + // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ip,"+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ipv6, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) + } + // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ip, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ipv6, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + return dftFlows, nil +} + +// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle +func getMaxFrameLength() int { + return config.Default.MTU + 14 +} + +// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a +// specific conntrack zone for reassembly with the same priority as node port +// flows that match on L4 fields. After reassembly packets are reinjected to +// table 0 again. This requires a conntrack immplementation that reassembles +// fragments. This reqreuiment is met for the kernel datapath with the netfilter +// module loaded. This reqreuiment is not met for the userspace datapath. +func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { + flows := make([]string, 0, 2) + if config.IPv4Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", + nodetypes.DefaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + if config.IPv6Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", + nodetypes.DefaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + + return flows +} + +func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + ofPortPhys := bridge.OfPortPhys + bridgeMacAddress := bridge.MacAddress.String() + ofPortHost := bridge.OfPortHost + bridgeIPs := bridge.Ips + + var dftFlows []string + + strip_vlan := "" + match_vlan := "" + mod_vlan_id := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if ofPortPhys != "" { + // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports + actions := "" + for _, netConfig := range bridge.PatchedNetConfigs() { + actions += "output:" + netConfig.OfPortPatch + "," + } + + actions += strip_vlan + "output:" + ofPortHost + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) + } + + // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all + // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). + for _, netConfig := range bridge.PatchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch)) + } + + if config.IPv4Mode { + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.PatchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // SNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range bridge.EipMarkIPs.GetIPv4() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.IsDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, + netConfig.MasqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host Commit connections with ct_mark CtMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range bridge.PatchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + // table 0, packets coming from external or other localnet ports. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", + nodetypes.DefaultOpenFlowCookie, config.Default.ConntrackZone)) + } + } + + if config.IPv6Mode { + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range bridge.PatchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // DNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range bridge.EipMarkIPs.GetIPv6() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.IsDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host. Commit connections with ct_mark CtMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range bridge.PatchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + if ofPortPhys != "" { + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + if ofPortPhys != "" { + // table 0, packets coming from external. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ + "actions=ct(zone=%d, nat, table=1)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) + } + } + // Egress IP is often configured on a node different from the one hosting the affected pod. + // Due to the fact that ovn-controllers on different nodes apply the changes independently, + // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. + // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) + defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] + if config.OVNKubernetesFeature.EnableEgressIP { + for _, clusterEntry := range config.Default.ClusterSubnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + // table 0, drop packets coming from pods headed externally that were not SNATed. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipv, ipv, cidr)) + } + for _, subnet := range defaultNetConfig.NodeSubnets { + ipv := getIPv(subnet) + if ofPortPhys != "" { + // table 0, commit connections from local pods. + // ICNIv2 requires that local pod traffic can leave the node without SNAT. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, bridgeMacAddress, ipv, ipv, subnet, + config.Default.ConntrackZone, nodetypes.CtMarkOVN, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + for _, netConfig := range bridge.PatchedNetConfigs() { + isNetworkAdvertised := netConfig.Advertised.Load() + // disableSNATMultipleGWs only applies to default network + disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs + if !disableSNATMultipleGWs && !isNetworkAdvertised { + continue + } + output := netConfig.OfPortPatch + if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { + // except if advertised through BGP, go to kernel + // TODO: MEG enabled pods should still go through the patch port + // but holding this until + // https://issues.redhat.com/browse/FDP-646 is fixed, for now we + // are assuming MEG & BGP are not used together + output = nodetypes.OvsLocalPort + } + for _, clusterEntry := range netConfig.Subnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + nodetypes.DefaultOpenFlowCookie, ipv, ipv, cidr, output)) + } + if output == netConfig.OfPortPatch { + // except node management traffic + for _, subnet := range netConfig.NodeSubnets { + mgmtIP := util.GetNodeManagementIfAddr(subnet) + ipv := getIPv(mgmtIP) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + nodetypes.DefaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, nodetypes.OvsLocalPort), + ) + } + } + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + if config.IPv6Mode { + // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved + // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry + for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", + nodetypes.DefaultOpenFlowCookie, icmpType)) + } + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", + nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) + } + } + + if config.IPv4Mode { + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", + nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) + } + } + + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost)) + + // Send UDN destined traffic to right patch port + for _, netConfig := range bridge.PatchedNetConfigs() { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, netConfig.OfPortPatch)) + } + } + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=1, table=11, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch)) + } + + // table 1, all other connections do normal processing + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", nodetypes.DefaultOpenFlowCookie)) + } + + return dftFlows, nil +} + +func PmtudDropFlows(bridge *BridgeConfiguration, ipAddrs []string) []string { + var flows []string + if config.Gateway.Mode != config.GatewayModeShared { + return nil + } + for _, addr := range ipAddrs { + for _, netConfig := range bridge.PatchedNetConfigs() { + flows = append(flows, + nodeutil.GenerateICMPFragmentationFlow(addr, nodetypes.OutputPortDrop, netConfig.OfPortPatch, nodetypes.PmtudOpenFlowCookie, 700)) + } + } + + return flows +} + +func getIPv(ipnet *net.IPNet) string { + prefix := "ip" + if utilnet.IsIPv6CIDR(ipnet) { + prefix = "ipv6" + } + return prefix +} + +// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: +// a. from pods in the OVN network to pods in a localnet network, on the same node +// b. from pods on the host to pods in a localnet network, on the same node +// when the localnet is mapped to breth0. +// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node +// primary interface. +func hostNetworkNormalActionFlows(netConfig *BridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { + var flows []string + var ipFamily, ipFamilyDest string + + if isV6 { + ipFamily = "ipv6" + ipFamilyDest = "ipv6_dst" + } else { + ipFamily = "ip" + ipFamilyDest = "nw_dst" + } + + formatFlow := func(inPort, destIP, ctMark string) string { + // Matching IP traffic will be handled by the bridge instead of being output directly + // to the NIC by the existing flow at prio=100. + flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(flowTemplate, + nodetypes.DefaultOpenFlowCookie, + inPort, + srcMAC, + ipFamily, + ipFamilyDest, + destIP, + config.Default.ConntrackZone, + ctMark) + } + + // Traffic path (a): OVN->localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(netConfig.OfPortPatch, hostSubnet.String(), netConfig.MasqCTMark)) + } + } + + // Traffic path (a): OVN->localnet for local gw mode + // Traffic path (b): host->localnet for both gw modes + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(nodetypes.OvsLocalPort, hostSubnet.String(), nodetypes.CtMarkHost)) + } + + if isV6 { + // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) + // that is unrelated to the host subnets matched in the prio=102 flow above. + // Allow neighbor discovery by matching against ICMP type and ingress port. + formatICMPFlow := func(inPort, ctMark string, icmpType int) string { + icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(icmpFlowTemplate, + nodetypes.DefaultOpenFlowCookie, + inPort, + srcMAC, + icmpType, + config.Default.ConntrackZone, + ctMark) + } + + for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { + // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + flows = append(flows, + formatICMPFlow(netConfig.OfPortPatch, netConfig.MasqCTMark, icmpType)) + } + + // Traffic path (a) for ICMP: OVN->localnet for local gw mode + // Traffic path (b) for ICMP: host->localnet for both gw modes + flows = append(flows, formatICMPFlow(nodetypes.OvsLocalPort, nodetypes.CtMarkHost, icmpType)) + } + } + return flows +} diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 51dc1571e1..a2bdf34e50 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -45,6 +45,7 @@ import ( nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/ovspinning" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/healthcheck" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" @@ -1320,7 +1321,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if config.OVNKubernetesFeature.EnableEgressService { wf := nc.watchFactory.(*factory.WatchFactory) - c, err := egressservice.NewController(nc.stopChan, ovnKubeNodeSNATMark, nc.name, + c, err := egressservice.NewController(nc.stopChan, nodetypes.OvnKubeNodeSNATMark, nc.name, wf.EgressServiceInformer(), wf.ServiceInformer(), wf.EndpointSliceInformer()) if err != nil { return err diff --git a/go-controller/pkg/node/egress_service_test.go b/go-controller/pkg/node/egress_service_test.go index bb4e57f5ca..ca44ac311d 100644 --- a/go-controller/pkg/node/egress_service_test.go +++ b/go-controller/pkg/node/egress_service_test.go @@ -19,6 +19,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" @@ -299,7 +300,7 @@ var _ = Describe("Egress Service Operations", func() { c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -405,7 +406,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.4 comment "nam c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -610,7 +611,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.3 comment "nam c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -805,7 +806,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -964,7 +965,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 948da997d5..a476783537 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -460,11 +460,6 @@ func (g *gateway) GetGatewayIface() string { return g.openflowManager.defaultBridge.GetGatewayIface() } -// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle -func getMaxFrameLength() int { - return config.Default.MTU + 14 -} - // SetDefaultGatewayBridgeMAC updates the mac address for the OFM used to render flows with func (g *gateway) SetDefaultGatewayBridgeMAC(macAddr net.HardwareAddr) { g.openflowManager.setDefaultBridgeMAC(macAddr) diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index fb902c589c..8dfe97a3f1 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -34,28 +34,17 @@ import ( nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) const ( - // defaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. - // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. - defaultOpenFlowCookie = "0xdeff105" // etpSvcOpenFlowCookie identifies constant open flow rules added to the host OVS // bridge to move packets between host and external for etp=local traffic. // The hex number 0xe745ecf105, represents etp(e74)-service(5ec)-flows which makes it easier for debugging. etpSvcOpenFlowCookie = "0xe745ecf105" - // pmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, - // fragmentation-needed (4) - pmtudOpenFlowCookie = "0x0304" - - // ctMarkHost is the conntrack mark value for host traffic - ctMarkHost = "0x2" - // ovnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for - // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. - ovnKubeNodeSNATMark = "0x3f0" // nftablesUDNServicePreroutingChain is a base chain registered into the prerouting hook, // and it contains one rule that jumps to nftablesUDNServiceMarkChain. @@ -92,10 +81,6 @@ const ( // to the appropriate network. nftablesUDNMarkExternalIPsV4Map = "udn-mark-external-ips-v4" nftablesUDNMarkExternalIPsV6Map = "udn-mark-external-ips-v6" - - // outputPortDrop is used to signify that there is no output port for an openflow action and the - // rendered action should result in a drop - outputPortDrop = "output-port-drop" ) // configureUDNServicesNFTables configures the nftables chains, rules, and verdict maps @@ -426,7 +411,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network flows := []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, + nodetypes.DefaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)} if util.IsRouteAdvertisementsEnabled() { // if the network is advertised, then for the reply from kapi and dns services to go back @@ -440,7 +425,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, ip, ip_src=%s,actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) } npw.ofm.updateFlowCacheEntry(key, flows) } @@ -535,7 +520,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, etpSvcOpenFlowCookie, npw.ofportPhys)) } else if config.Gateway.Mode == config.GatewayModeShared { // add the ICMP Fragmentation flow for shared gateway mode. - icmpFlow := generateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.OfPortPatch, npw.ofportPhys, cookie, 110) + icmpFlow := nodeutil.GenerateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.OfPortPatch, npw.ofportPhys, cookie, 110) externalIPFlows = append(externalIPFlows, icmpFlow) // case2 (see function description for details) externalIPFlows = append(externalIPFlows, @@ -601,31 +586,6 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, return arpFlow } -func generateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { - // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that - // path MTU discovery continues to work. - icmpMatch := "icmp" - icmpType := 3 - icmpCode := 4 - nwDst := "nw_dst" - if utilnet.IsIPv6String(ipAddr) { - icmpMatch = "icmp6" - icmpType = 2 - icmpCode = 0 - nwDst = "ipv6_dst" - } - - action := fmt.Sprintf("output:%s", outputPort) - if outputPort == outputPortDrop { - action = "drop" - } - - icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ - "icmp_code=%d, actions=%s", - cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) - return icmpFragmentationFlow -} - // getAndDeleteServiceInfo returns the serviceConfig for a service and if it exists and then deletes the entry func (npw *nodePortWatcher) getAndDeleteServiceInfo(index ktypes.NamespacedName) (out *serviceConfig, exists bool) { npw.serviceInfoLock.Lock() @@ -1449,894 +1409,6 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro return utilerrors.Join(errors...) } -func flowsForDefaultBridge(bridge *bridgeconfig.BridgeConfiguration, extraIPs []net.IP) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - - ofPortPhys := bridge.OfPortPhys - bridgeMacAddress := bridge.MacAddress.String() - ofPortHost := bridge.OfPortHost - bridgeIPs := bridge.Ips - - var dftFlows []string - // 14 bytes of overhead for ethernet header (does not include VLAN) - maxPktLength := getMaxFrameLength() - - strip_vlan := "" - mod_vlan_id := "" - match_vlan := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if config.IPv4Mode { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) - } - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - for _, netConfig := range bridge.PatchedNetConfigs() { - // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() == nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { - continue - } - - for _, netConfig := range bridge.PatchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - if config.IPv6Mode { - if ofPortPhys != "" { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, nodetypes.OvsLocalPort, config.Default.EncapPort, ofPortPhys)) - } - - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range bridge.PatchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() != nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { - continue - } - - for _, netConfig := range bridge.PatchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - - var protoPrefix, masqIP, masqSubnet string - - // table 0, packets coming from Host -> Service - for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { - if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ipv6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) - - if util.IsNetworkSegmentationSupportEnabled() { - // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. - // For packets originating from UDN, commit without NATing, those - // have already been SNATed to the masq IP of the UDN. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - if util.IsRouteAdvertisementsEnabled() { - // If the UDN is advertised then instead of matching on the masqSubnet - // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 - // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 - // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range bridge.PatchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - if netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) - continue - } - - // Use the filtered subnet for the flow compute instead of the masqueradeIP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - } - } - } - } - - masqDst := masqIP - if util.IsNetworkSegmentationSupportEnabled() { - // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services - masqDst = masqSubnet - } - for _, netConfig := range bridge.PatchedNetConfigs() { - // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ - "actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR, - protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) - // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either - // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. - // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ - "actions=drop", defaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR)) - } - } - - // table 0, add IP fragment reassembly flows, only needed in SGW mode with - // physical interface attached to bridge - if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { - reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) - dftFlows = append(dftFlows, reassemblyFlows...) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { - var actions string - if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { - actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) - } else { - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) - } - - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - } - - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.MasqCTMark, actions)) - } - } - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - } - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - } - - defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] - - // table 2, dispatch from Host -> OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=2, "+ - "actions=set_field:%s->eth_dst,%soutput:%s", defaultOpenFlowCookie, - bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) - - // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have - // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. - if config.IPv4Mode { - for _, netConfig := range bridge.PatchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.V4MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - - // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that - // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to - // a service in another UDN. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ - "actions=drop", - defaultOpenFlowCookie, srcIPOrSubnet)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.PktMark, - bridgeMacAddress, netConfig.OfPortPatch)) - } - } - - if config.IPv6Mode { - for _, netConfig := range bridge.PatchedNetConfigs() { - if netConfig.IsDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.V6MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ - "actions=drop", - defaultOpenFlowCookie, srcIPOrSubnet)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.PktMark, - bridgeMacAddress, netConfig.OfPortPatch)) - } - } - - // table 3, dispatch from OVN -> Host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=3, %s "+ - "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - // table 4, hairpinned pkts that need to go from OVN -> Host - // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ip,"+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ipv6, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) - } - // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ip, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ipv6, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - return dftFlows, nil -} - -func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeconfig.BridgeConfiguration) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - ofPortPhys := bridge.OfPortPhys - bridgeMacAddress := bridge.MacAddress.String() - ofPortHost := bridge.OfPortHost - bridgeIPs := bridge.Ips - - var dftFlows []string - - strip_vlan := "" - match_vlan := "" - mod_vlan_id := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if ofPortPhys != "" { - // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports - actions := "" - for _, netConfig := range bridge.PatchedNetConfigs() { - actions += "output:" + netConfig.OfPortPatch + "," - } - actions += strip_vlan + "NORMAL" - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) - } - - // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all - // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range bridge.PatchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - defaultOpenFlowCookie, netConfig.OfPortPatch)) - } - - if config.IPv4Mode { - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // SNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range bridge.EipMarkIPs.GetIPv4() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.IsDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, - netConfig.MasqCTMark, ofPortPhys)) - - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host Commit connections with ct_mark ctMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.PatchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - // table 0, packets coming from external or other localnet ports. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", - defaultOpenFlowCookie, config.Default.ConntrackZone)) - } - } - - if config.IPv6Mode { - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // DNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range bridge.EipMarkIPs.GetIPv6() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.IsDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) - - // Allow (a) OVN->host traffic on the same node - // (b) host->host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host. Commit connections with ct_mark ctMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) - - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.PatchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - if ofPortPhys != "" { - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - if ofPortPhys != "" { - // table 0, packets coming from external. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ - "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) - } - } - // Egress IP is often configured on a node different from the one hosting the affected pod. - // Due to the fact that ovn-controllers on different nodes apply the changes independently, - // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. - // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] - if config.OVNKubernetesFeature.EnableEgressIP { - for _, clusterEntry := range config.Default.ClusterSubnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - // table 0, drop packets coming from pods headed externally that were not SNATed. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", - defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipv, ipv, cidr)) - } - for _, subnet := range defaultNetConfig.NodeSubnets { - ipv := getIPv(subnet) - if ofPortPhys != "" { - // table 0, commit connections from local pods. - // ICNIv2 requires that local pod traffic can leave the node without SNAT. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, defaultNetConfig.OfPortPatch, bridgeMacAddress, ipv, ipv, subnet, - config.Default.ConntrackZone, nodetypes.CtMarkOVN, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { - isNetworkAdvertised := netConfig.Advertised.Load() - // disableSNATMultipleGWs only applies to default network - disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs - if !disableSNATMultipleGWs && !isNetworkAdvertised { - continue - } - output := netConfig.OfPortPatch - if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { - // except if advertised through BGP, go to kernel - // TODO: MEG enabled pods should still go through the patch port - // but holding this until - // https://issues.redhat.com/browse/FDP-646 is fixed, for now we - // are assuming MEG & BGP are not used together - output = nodetypes.OvsLocalPort - } - for _, clusterEntry := range netConfig.Subnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - defaultOpenFlowCookie, ipv, ipv, cidr, output)) - } - if output == netConfig.OfPortPatch { - // except node management traffic - for _, subnet := range netConfig.NodeSubnets { - mgmtIP := util.GetNodeManagementIfAddr(subnet) - ipv := getIPv(mgmtIP) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - defaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, nodetypes.OvsLocalPort), - ) - } - } - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - if config.IPv6Mode { - // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved - // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry - for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", - defaultOpenFlowCookie, icmpType)) - } - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) - } - } - - if config.IPv4Mode { - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) - } - } - - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortHost)) - - // Send UDN destined traffic to right patch port - for _, netConfig := range bridge.PatchedNetConfigs() { - if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ - "actions=output:%s", defaultOpenFlowCookie, netConfig.MasqCTMark, netConfig.OfPortPatch)) - } - } - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=1, table=11, "+ - "actions=output:%s", defaultOpenFlowCookie, defaultNetConfig.OfPortPatch)) - } - - // table 1, all other connections do normal processing - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", defaultOpenFlowCookie)) - } - - return dftFlows, nil -} - -func pmtudDropFlows(bridge *bridgeconfig.BridgeConfiguration, ipAddrs []string) []string { - var flows []string - if config.Gateway.Mode != config.GatewayModeShared { - return nil - } - for _, addr := range ipAddrs { - for _, netConfig := range bridge.PatchedNetConfigs() { - flows = append(flows, - generateICMPFragmentationFlow(addr, outputPortDrop, netConfig.OfPortPatch, pmtudOpenFlowCookie, 700)) - } - } - - return flows -} - -// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: -// a. from pods in the OVN network to pods in a localnet network, on the same node -// b. from pods on the host to pods in a localnet network, on the same node -// when the localnet is mapped to breth0. -// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node -// primary interface. -func hostNetworkNormalActionFlows(netConfig *bridgeconfig.BridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { - var flows []string - var ipFamily, ipFamilyDest string - - if isV6 { - ipFamily = "ipv6" - ipFamilyDest = "ipv6_dst" - } else { - ipFamily = "ip" - ipFamilyDest = "nw_dst" - } - - formatFlow := func(inPort, destIP, ctMark string) string { - // Matching IP traffic will be handled by the bridge instead of being output directly - // to the NIC by the existing flow at prio=100. - flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(flowTemplate, - defaultOpenFlowCookie, - inPort, - srcMAC, - ipFamily, - ipFamilyDest, - destIP, - config.Default.ConntrackZone, - ctMark) - } - - // Traffic path (a): OVN->localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { - continue - } - flows = append(flows, formatFlow(netConfig.OfPortPatch, hostSubnet.String(), netConfig.MasqCTMark)) - } - } - - // Traffic path (a): OVN->localnet for local gw mode - // Traffic path (b): host->localnet for both gw modes - for _, hostSubnet := range hostSubnets { - if utilnet.IsIPv6(hostSubnet.IP) != isV6 { - continue - } - flows = append(flows, formatFlow(nodetypes.OvsLocalPort, hostSubnet.String(), ctMarkHost)) - } - - if isV6 { - // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) - // that is unrelated to the host subnets matched in the prio=102 flow above. - // Allow neighbor discovery by matching against ICMP type and ingress port. - formatICMPFlow := func(inPort, ctMark string, icmpType int) string { - icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" - return fmt.Sprintf(icmpFlowTemplate, - defaultOpenFlowCookie, - inPort, - srcMAC, - icmpType, - config.Default.ConntrackZone, - ctMark) - } - - for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { - // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode - if config.Gateway.Mode == config.GatewayModeShared { - flows = append(flows, - formatICMPFlow(netConfig.OfPortPatch, netConfig.MasqCTMark, icmpType)) - } - - // Traffic path (a) for ICMP: OVN->localnet for local gw mode - // Traffic path (b) for ICMP: host->localnet for both gw modes - flows = append(flows, formatICMPFlow(nodetypes.OvsLocalPort, ctMarkHost, icmpType)) - } - } - return flows -} - func newGateway( nodeName string, subnets []*net.IPNet, @@ -2811,36 +1883,6 @@ func updateMasqueradeAnnotation(nodeName string, kube kube.Interface) error { return nil } -// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a -// specific conntrack zone for reassembly with the same priority as node port -// flows that match on L4 fields. After reassembly packets are reinjected to -// table 0 again. This requires a conntrack immplementation that reassembles -// fragments. This reqreuiment is met for the kernel datapath with the netfilter -// module loaded. This reqreuiment is not met for the userspace datapath. -func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { - flows := make([]string, 0, 2) - if config.IPv4Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", - defaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - if config.IPv6Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", - defaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - - return flows -} - // deleteStaleMasqueradeResources removes stale Linux resources when config.Gateway.V4MasqueradeSubnet // or config.Gateway.V6MasqueradeSubnet gets changed at day 2. func deleteStaleMasqueradeResources(bridgeName, nodeName string, wf factory.NodeWatchFactory) error { @@ -2974,14 +2016,6 @@ func deleteMasqueradeResources(link netlink.Link, staleMasqueradeIPs *config.Mas return utilerrors.Join(aggregatedErrors...) } -func getIPv(ipnet *net.IPNet) string { - prefix := "ip" - if utilnet.IsIPv6CIDR(ipnet) { - prefix = "ipv6" - } - return prefix -} - // configureAdvertisedUDNIsolationNFTables configures nftables to drop traffic generated locally towards advertised UDN subnets. // It sets up a nftables chain named nftablesUDNBGPOutputChain in the output hook with filter priority which drops // traffic originating from the local node destined to nftablesAdvertisedUDNsSet. diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 8f4082d1c5..5622a226d7 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -21,7 +21,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" "sigs.k8s.io/knftables" @@ -243,105 +242,6 @@ func openflowManagerCheckPorts(ofMgr *openflowManager) { Expect(checkPorts(netConfigs, uplink, ofPortPhys)).To(Succeed()) } -func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeconfig.BridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { - By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) - - var masqIP string - var masqSubnet string - var protoPrefix string - if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ip6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - var nTable0DefaultFlows int - var nTable0UDNMasqFlows int - var nTable2Flows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", - ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, - masqIP)) { - nTable0DefaultFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", - ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { - nTable0UDNMasqFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", - bridgeMAC, defaultConfig.OfPortPatch)) { - nTable2Flows++ - } - } - - Expect(nTable0DefaultFlows).To(Equal(1)) - Expect(nTable0UDNMasqFlows).To(Equal(1)) - Expect(nTable2Flows).To(Equal(1)) -} - -func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeconfig.BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking advertsised UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var matchingIPFamilySubnet *net.IPNet - var protoPrefix string - var udnAdvertisedSubnets []*net.IPNet - var err error - for _, clusterEntry := range netConfig.Subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - if utilnet.IsIPv4CIDR(svcCIDR) { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip" - } else { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", - protoPrefix, protoPrefix, matchingIPFamilySubnet)) { - nFlows++ - } - if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", - protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - -func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeconfig.BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var mgmtMasqIP string - var protoPrefix string - if utilnet.IsIPv4CIDR(svcCIDR) { - mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip" - } else { - mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", - protoPrefix, protoPrefix, mgmtMasqIP)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - func getDummyOpenflowManager() *openflowManager { gwBridge := bridgeconfig.TestBridgeConfig("breth0") ofm := &openflowManager{ @@ -792,10 +692,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -822,10 +722,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1023,10 +923,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1053,10 +953,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1264,10 +1164,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per advertised UDN for table 2 and table 0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) + bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1294,10 +1194,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 and table0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) + bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 0b96b2186f..b8d8d8406e 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -14,6 +14,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -210,12 +211,12 @@ func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []strin c.defaultBridge.Mutex.Lock() defer c.defaultBridge.Mutex.Unlock() - dftFlows := pmtudDropFlows(c.defaultBridge, ipAddrs) + dftFlows := bridgeconfig.PmtudDropFlows(c.defaultBridge, ipAddrs) c.updateFlowCacheEntry(key, dftFlows) if c.externalGatewayBridge != nil { c.externalGatewayBridge.Mutex.Lock() defer c.externalGatewayBridge.Mutex.Unlock() - exGWBridgeDftFlows := pmtudDropFlows(c.externalGatewayBridge, ipAddrs) + exGWBridgeDftFlows := bridgeconfig.PmtudDropFlows(c.externalGatewayBridge, ipAddrs) c.updateExBridgeFlowCacheEntry(key, exGWBridgeDftFlows) } } @@ -230,11 +231,11 @@ func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets [] // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - dftFlows, err := flowsForDefaultBridge(c.defaultBridge, hostIPs) + dftFlows, err := bridgeconfig.FlowsForDefaultBridge(c.defaultBridge, hostIPs) if err != nil { return err } - dftCommonFlows, err := commonFlows(hostSubnets, c.defaultBridge) + dftCommonFlows, err := bridgeconfig.CommonFlows(hostSubnets, c.defaultBridge) if err != nil { return err } @@ -248,7 +249,7 @@ func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets [] c.externalGatewayBridge.Mutex.Lock() defer c.externalGatewayBridge.Mutex.Unlock() c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) - exGWBridgeDftFlows, err := commonFlows(hostSubnets, c.externalGatewayBridge) + exGWBridgeDftFlows, err := bridgeconfig.CommonFlows(hostSubnets, c.externalGatewayBridge) if err != nil { return err } @@ -357,10 +358,10 @@ func bootstrapOVSFlows(nodeName string) error { // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - defaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) + nodetypes.DefaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - defaultOpenFlowCookie, ofportPatch)) + nodetypes.DefaultOpenFlowCookie, ofportPatch)) dftFlows = append(dftFlows, "priority=0, table=0, actions=output:NORMAL") _, stderr, err = util.ReplaceOFFlows(bridge, dftFlows) diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go index b486302dd6..bdf9c388bf 100644 --- a/go-controller/pkg/node/types/const.go +++ b/go-controller/pkg/node/types/const.go @@ -5,4 +5,18 @@ const ( CtMarkOVN = "0x1" // OvsLocalPort is the name of the OVS bridge local port OvsLocalPort = "LOCAL" + // DefaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. + // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. + DefaultOpenFlowCookie = "0xdeff105" + // OutputPortDrop is used to signify that there is no output port for an openflow action and the + // rendered action should result in a drop + OutputPortDrop = "output-port-drop" + // OvnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for + // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. + OvnKubeNodeSNATMark = "0x3f0" + // PmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, + // fragmentation-needed (4) + PmtudOpenFlowCookie = "0x0304" + // CtMarkHost is the conntrack mark value for host traffic + CtMarkHost = "0x2" ) diff --git a/go-controller/pkg/node/util/util.go b/go-controller/pkg/node/util/util.go index 9ad21a9a8e..e04be61b39 100644 --- a/go-controller/pkg/node/util/util.go +++ b/go-controller/pkg/node/util/util.go @@ -7,6 +7,7 @@ import ( net2 "k8s.io/utils/net" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" pkgutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -90,3 +91,28 @@ func GetDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*ne } return gwIps, nil } + +func GenerateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { + // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that + // path MTU discovery continues to work. + icmpMatch := "icmp" + icmpType := 3 + icmpCode := 4 + nwDst := "nw_dst" + if net2.IsIPv6String(ipAddr) { + icmpMatch = "icmp6" + icmpType = 2 + icmpCode = 0 + nwDst = "ipv6_dst" + } + + action := fmt.Sprintf("output:%s", outputPort) + if outputPort == nodetypes.OutputPortDrop { + action = "drop" + } + + icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ + "icmp_code=%d, actions=%s", + cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) + return icmpFragmentationFlow +} From 5a5e3b6d952ba922057e501965dd57a8739da4b4 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 16:30:26 +0200 Subject: [PATCH 130/181] [bridgeconfig] move flow generation locking into methods. The locking logic is slightly changed, because now bridge is only locked during flow generation and not for the whole openflow_manager update duration. Also only one bridge is now locked at a time. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeflows.go | 94 ++++++++++++------- go-controller/pkg/node/openflow_manager.go | 28 ++---- 2 files changed, 65 insertions(+), 57 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index 5a3467ae21..b642ffda70 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -14,14 +14,35 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]string, error) { +func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extraIPs []net.IP) ([]string, error) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + dftFlows, err := b.flowsForDefaultBridge(extraIPs) + if err != nil { + return nil, err + } + dftCommonFlows, err := b.commonFlows(hostSubnets) + if err != nil { + return nil, err + } + return append(dftFlows, dftCommonFlows...), nil +} + +func (b *BridgeConfiguration) ExternalBridgeFlows(hostSubnets []*net.IPNet) ([]string, error) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.commonFlows(hostSubnets) +} + +// must be called with bridge.mutex held +func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := bridge.OfPortPhys - bridgeMacAddress := bridge.MacAddress.String() - ofPortHost := bridge.OfPortHost - bridgeIPs := bridge.Ips + ofPortPhys := b.OfPortPhys + bridgeMacAddress := b.MacAddress.String() + ofPortHost := b.OfPortHost + bridgeIPs := b.Ips var dftFlows []string // 14 bytes of overhead for ethernet header (does not include VLAN) @@ -58,7 +79,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st if err != nil { return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ @@ -82,7 +103,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st continue } - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", @@ -121,7 +142,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", @@ -144,7 +165,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st continue } - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", @@ -195,7 +216,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -228,7 +249,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services masqDst = masqSubnet } - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ @@ -251,7 +272,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st dftFlows = append(dftFlows, reassemblyFlows...) } if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { var actions string if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) @@ -319,7 +340,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) } - defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] + defaultNetConfig := b.NetConfig[types.DefaultNetworkName] // table 2, dispatch from Host -> OVN dftFlows = append(dftFlows, @@ -330,7 +351,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. if config.IPv4Mode { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -368,7 +389,7 @@ func FlowsForDefaultBridge(bridge *BridgeConfiguration, extraIPs []net.IP) ([]st } if config.IPv6Mode { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -472,13 +493,14 @@ func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { return flows } -func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]string, error) { +// must be called with bridge.mutex held +func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := bridge.OfPortPhys - bridgeMacAddress := bridge.MacAddress.String() - ofPortHost := bridge.OfPortHost - bridgeIPs := bridge.Ips + ofPortPhys := b.OfPortPhys + bridgeMacAddress := b.MacAddress.String() + ofPortHost := b.OfPortHost + bridgeIPs := b.Ips var dftFlows []string @@ -494,7 +516,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin if ofPortPhys != "" { // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports actions := "" - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { actions += "output:" + netConfig.OfPortPatch + "," } @@ -506,7 +528,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) @@ -521,7 +543,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // SNATs these into egressIP prior to reaching external bridge. @@ -536,9 +558,9 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + config.Gateway.Mode != config.GatewayModeDisabled && b.EipMarkIPs != nil { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range bridge.EipMarkIPs.GetIPv4() { + for mark, eip := range b.EipMarkIPs.GetIPv4() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", @@ -580,7 +602,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, @@ -620,7 +642,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // DNATs these into egressIP prior to reaching external bridge. @@ -635,9 +657,9 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.EipMarkIPs != nil { + config.Gateway.Mode != config.GatewayModeDisabled && b.EipMarkIPs != nil { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range bridge.EipMarkIPs.GetIPv6() { + for mark, eip := range b.EipMarkIPs.GetIPv6() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", @@ -679,7 +701,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, @@ -714,7 +736,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin // Due to the fact that ovn-controllers on different nodes apply the changes independently, // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := bridge.NetConfig[types.DefaultNetworkName] + defaultNetConfig := b.NetConfig[types.DefaultNetworkName] if config.OVNKubernetesFeature.EnableEgressIP { for _, clusterEntry := range config.Default.ClusterSubnets { cidr := clusterEntry.CIDR @@ -739,7 +761,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin } if ofPortPhys != "" { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { isNetworkAdvertised := netConfig.Advertised.Load() // disableSNATMultipleGWs only applies to default network disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs @@ -817,7 +839,7 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost)) // Send UDN destined traffic to right patch port - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ @@ -838,13 +860,15 @@ func CommonFlows(hostSubnets []*net.IPNet, bridge *BridgeConfiguration) ([]strin return dftFlows, nil } -func PmtudDropFlows(bridge *BridgeConfiguration, ipAddrs []string) []string { +func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { + b.Mutex.Lock() + defer b.Mutex.Unlock() var flows []string if config.Gateway.Mode != config.GatewayModeShared { return nil } for _, addr := range ipAddrs { - for _, netConfig := range bridge.PatchedNetConfigs() { + for _, netConfig := range b.PatchedNetConfigs() { flows = append(flows, nodeutil.GenerateICMPFragmentationFlow(addr, nodetypes.OutputPortDrop, netConfig.OfPortPatch, nodetypes.PmtudOpenFlowCookie, 700)) } diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index b8d8d8406e..f7e1bccfe5 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -207,16 +207,10 @@ func (c *openflowManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) } func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []string) { - // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - - dftFlows := bridgeconfig.PmtudDropFlows(c.defaultBridge, ipAddrs) + dftFlows := c.defaultBridge.PMTUDDropFlows(ipAddrs) c.updateFlowCacheEntry(key, dftFlows) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Mutex.Lock() - defer c.externalGatewayBridge.Mutex.Unlock() - exGWBridgeDftFlows := bridgeconfig.PmtudDropFlows(c.externalGatewayBridge, ipAddrs) + exGWBridgeDftFlows := c.externalGatewayBridge.PMTUDDropFlows(ipAddrs) c.updateExBridgeFlowCacheEntry(key, exGWBridgeDftFlows) } } @@ -224,35 +218,25 @@ func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []strin // updateBridgeFlowCache generates the "static" per-bridge flows // note: this is shared between shared and local gateway modes func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets []*net.IPNet) error { - // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - dftFlows, err := bridgeconfig.FlowsForDefaultBridge(c.defaultBridge, hostIPs) + dftFlows, err := c.defaultBridge.DefaultBridgeFlows(hostSubnets, hostIPs) if err != nil { return err } - dftCommonFlows, err := bridgeconfig.CommonFlows(hostSubnets, c.defaultBridge) - if err != nil { - return err - } - dftFlows = append(dftFlows, dftCommonFlows...) c.updateFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateFlowCacheEntry("DEFAULT", dftFlows) // we consume ex gw bridge flows only if that is enabled if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Mutex.Lock() - defer c.externalGatewayBridge.Mutex.Unlock() - c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) - exGWBridgeDftFlows, err := bridgeconfig.CommonFlows(hostSubnets, c.externalGatewayBridge) + exGWBridgeDftFlows, err := c.externalGatewayBridge.ExternalBridgeFlows(hostSubnets) if err != nil { return err } + + c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateExBridgeFlowCacheEntry("DEFAULT", exGWBridgeDftFlows) } return nil From 4ad1727c088ee8bf7d7f8455d5dae26c55380e29 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:11:52 +0200 Subject: [PATCH 131/181] [bridgeconfig] make most members internal, ensure correct locking. Split internal member into read-only and read-writable, make sure to use mutex in the second case. Rename some methods to remove unneeded "bridge" part of the name. Move GetGatewayIface logic to the bridgeconfig creation. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 213 +++++++++++------- .../bridgeconfig/bridgeconfig_testutil.go | 12 +- .../pkg/node/bridgeconfig/bridgeflows.go | 28 +-- go-controller/pkg/node/gateway.go | 26 +-- .../pkg/node/gateway_localnet_linux_test.go | 2 +- go-controller/pkg/node/gateway_shared_intf.go | 10 +- go-controller/pkg/node/gateway_udn.go | 4 +- go-controller/pkg/node/gateway_udn_test.go | 56 ++--- .../pkg/node/node_ip_handler_linux.go | 3 +- go-controller/pkg/node/openflow_manager.go | 16 +- 10 files changed, 211 insertions(+), 159 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 0dd601cc24..979474eab5 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -63,20 +63,25 @@ func (netConfig *BridgeUDNConfiguration) setOfPatchPort() error { } type BridgeConfiguration struct { - Mutex sync.Mutex - NodeName string - BridgeName string - UplinkName string - GwIface string - GwIfaceRep string - Ips []*net.IPNet - InterfaceID string - MacAddress net.HardwareAddr - OfPortPhys string - OfPortHost string - NetConfig map[string]*BridgeUDNConfiguration - EipMarkIPs *egressip.MarkIPsCache - NextHops []net.IP + Mutex sync.Mutex + + // variables that are only set on creation and never changed + // don't require mutex lock to read + nodeName string + bridgeName string + uplinkName string + gwIface string + gwIfaceRep string + interfaceID string + + // variables that can be updated (read/write access should be done with mutex held) + ofPortHost string + ips []*net.IPNet + macAddress net.HardwareAddr + ofPortPhys string + netConfig map[string]*BridgeUDNConfiguration + eipMarkIPs *egressip.MarkIPsCache + nextHops []net.IP } func NewBridgeConfiguration(intfName, nodeName, @@ -95,16 +100,16 @@ func NewBridgeConfiguration(intfName, nodeName, NodeSubnets: nodeSubnets, } res := BridgeConfiguration{ - NodeName: nodeName, - NetConfig: map[string]*BridgeUDNConfiguration{ + nodeName: nodeName, + netConfig: map[string]*BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, - EipMarkIPs: egressip.NewMarkIPsCache(), + eipMarkIPs: egressip.NewMarkIPsCache(), } if len(gwNextHops) > 0 { - res.NextHops = gwNextHops + res.nextHops = gwNextHops } - res.NetConfig[types.DefaultNetworkName].Advertised.Store(advertised) + res.netConfig[types.DefaultNetworkName].Advertised.Store(advertised) if config.Gateway.GatewayAcceleratedInterface != "" { // Try to get representor for the specified gateway device. @@ -138,20 +143,20 @@ func NewBridgeConfiguration(intfName, nodeName, if err != nil { return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) } - res.BridgeName = bridgeName - res.UplinkName = uplinkName - res.GwIfaceRep = intfRep - res.GwIface = gwIntf - res.MacAddress = link.Attrs().HardwareAddr + res.bridgeName = bridgeName + res.uplinkName = uplinkName + res.gwIfaceRep = intfRep + res.gwIface = gwIntf + res.macAddress = link.Attrs().HardwareAddr } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { // This is an OVS bridge's internal port uplinkName, err := util.GetNicName(bridgeName) if err != nil { return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) } - res.BridgeName = bridgeName - res.GwIface = bridgeName - res.UplinkName = uplinkName + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = uplinkName gwIntf = bridgeName } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { // This is not a OVS bridge. We need to create a OVS bridge @@ -160,9 +165,9 @@ func NewBridgeConfiguration(intfName, nodeName, if err != nil { return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) } - res.BridgeName = bridgeName - res.GwIface = bridgeName - res.UplinkName = intfName + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = intfName gwIntf = bridgeName } else { // gateway interface is an OVS bridge @@ -174,60 +179,62 @@ func NewBridgeConfiguration(intfName, nodeName, return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) } } else { - res.UplinkName = uplinkName + res.uplinkName = uplinkName } - res.BridgeName = intfName - res.GwIface = intfName + res.bridgeName = intfName + res.gwIface = intfName } // Now, we get IP addresses for the bridge if len(gwIPs) > 0 { // use gwIPs if provided - res.Ips = gwIPs + res.ips = gwIPs } else { // get IP addresses from OVS bridge. If IP does not exist, // error out. - res.Ips, err = nodeutil.GetNetworkInterfaceIPAddresses(gwIntf) + res.ips, err = nodeutil.GetNetworkInterfaceIPAddresses(gwIntf) if err != nil { return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) } } if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface - res.MacAddress, err = util.GetOVSPortMACAddress(gwIntf) + res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) if err != nil { return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) } } - res.InterfaceID, err = bridgedGatewayNodeSetup(nodeName, res.BridgeName, physicalNetworkName) + res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) if err != nil { return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) } // the name of the patch port created by ovn-controller is of the form // patch--to-br-int - defaultNetConfig.PatchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.BridgeName, nodeName) + defaultNetConfig.PatchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) // for DPU we use the host MAC address for the Gateway configuration if config.OvnKubeNode.Mode == types.NodeModeDPU { - hostRep, err := util.GetDPUHostInterface(res.BridgeName) + hostRep, err := util.GetDPUHostInterface(res.bridgeName) if err != nil { return nil, err } - res.MacAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) + res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) if err != nil { return nil, err } } + + // If gwIface is set, then accelerated GW interface is present and we use it. Else use external bridge instead. + if res.gwIface == "" { + res.gwIface = res.bridgeName + } + return &res, nil } func (b *BridgeConfiguration) GetGatewayIface() string { - // If GwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. - if b.GwIface != "" { - return b.GwIface - } - return b.BridgeName + return b.gwIface } // UpdateInterfaceIPAddresses sets and returns the bridge's current ips @@ -256,24 +263,24 @@ func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]* } } - b.Ips = ifAddrs + b.ips = ifAddrs return ifAddrs, nil } -// GetBridgePortConfigurations returns a slice of Network port configurations along with the +// GetPortConfigurations returns a slice of Network port configurations along with the // uplinkName and physical port's ofport value -func (b *BridgeConfiguration) GetBridgePortConfigurations() ([]*BridgeUDNConfiguration, string, string) { +func (b *BridgeConfiguration) GetPortConfigurations() ([]*BridgeUDNConfiguration, string, string) { b.Mutex.Lock() defer b.Mutex.Unlock() var netConfigs []*BridgeUDNConfiguration - for _, netConfig := range b.NetConfig { + for _, netConfig := range b.netConfig { netConfigs = append(netConfigs, netConfig.ShallowCopy()) } - return netConfigs, b.UplinkName, b.OfPortPhys + return netConfigs, b.uplinkName, b.ofPortPhys } -// AddNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache -func (b *BridgeConfiguration) AddNetworkBridgeConfig( +// AddNetworkConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache +func (b *BridgeConfiguration) AddNetworkConfig( nInfo util.NetInfo, nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, @@ -282,9 +289,9 @@ func (b *BridgeConfiguration) AddNetworkBridgeConfig( defer b.Mutex.Unlock() netName := nInfo.GetNetworkName() - patchPort := nInfo.GetNetworkScopedPatchPortName(b.BridgeName, b.NodeName) + patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) - _, found := b.NetConfig[netName] + _, found := b.netConfig[netName] if !found { netConfig := &BridgeUDNConfiguration{ PatchPort: patchPort, @@ -295,9 +302,9 @@ func (b *BridgeConfiguration) AddNetworkBridgeConfig( Subnets: nInfo.Subnets(), NodeSubnets: nodeSubnets, } - netConfig.Advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.NodeName)) + netConfig.Advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) - b.NetConfig[netName] = netConfig + b.netConfig[netName] = netConfig } else { klog.Warningf("Trying to update bridge config for network %s which already"+ "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) @@ -305,18 +312,18 @@ func (b *BridgeConfiguration) AddNetworkBridgeConfig( return nil } -// DelNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache -func (b *BridgeConfiguration) DelNetworkBridgeConfig(nInfo util.NetInfo) { +// DelNetworkConfig deletes the provided netInfo from the bridge configuration cache +func (b *BridgeConfiguration) DelNetworkConfig(nInfo util.NetInfo) { b.Mutex.Lock() defer b.Mutex.Unlock() - delete(b.NetConfig, nInfo.GetNetworkName()) + delete(b.netConfig, nInfo.GetNetworkName()) } -func (b *BridgeConfiguration) GetNetworkBridgeConfig(networkName string) *BridgeUDNConfiguration { +func (b *BridgeConfiguration) GetNetworkConfig(networkName string) *BridgeUDNConfiguration { b.Mutex.Lock() defer b.Mutex.Unlock() - return b.NetConfig[networkName] + return b.netConfig[networkName] } // GetActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the @@ -328,15 +335,17 @@ func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName strin b.Mutex.Lock() defer b.Mutex.Unlock() - if netConfig, found := b.NetConfig[networkName]; found && netConfig.OfPortPatch != "" { + if netConfig, found := b.netConfig[networkName]; found && netConfig.OfPortPatch != "" { return netConfig.ShallowCopy() } return nil } func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { - result := make([]*BridgeUDNConfiguration, 0, len(b.NetConfig)) - for _, netConfig := range b.NetConfig { + b.Mutex.Lock() + defer b.Mutex.Unlock() + result := make([]*BridgeUDNConfiguration, 0, len(b.netConfig)) + for _, netConfig := range b.netConfig { if netConfig.OfPortPatch == "" { continue } @@ -350,7 +359,7 @@ func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { func (b *BridgeConfiguration) IsGatewayReady() bool { b.Mutex.Lock() defer b.Mutex.Unlock() - for _, netConfig := range b.NetConfig { + for _, netConfig := range b.netConfig { ready := gatewayReady(netConfig.PatchPort) if !ready { return false @@ -363,44 +372,44 @@ func (b *BridgeConfiguration) SetOfPorts() error { b.Mutex.Lock() defer b.Mutex.Unlock() // Get ofport of patchPort - for _, netConfig := range b.NetConfig { + for _, netConfig := range b.netConfig { if err := netConfig.setOfPatchPort(); err != nil { return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) } } - if b.UplinkName != "" { + if b.uplinkName != "" { // Get ofport of physical interface - ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", b.UplinkName, "ofport") + ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", b.uplinkName, "ofport") if err != nil { return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - b.UplinkName, stderr, err) + b.uplinkName, stderr, err) } - b.OfPortPhys = ofportPhys + b.ofPortPhys = ofportPhys } // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. if config.OvnKubeNode.Mode == types.NodeModeDPU { var stderr string - hostRep, err := util.GetDPUHostInterface(b.BridgeName) + hostRep, err := util.GetDPUHostInterface(b.bridgeName) if err != nil { return err } - b.OfPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") + b.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") if err != nil { return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", hostRep, stderr, err) } } else { var err error - if b.GwIfaceRep != "" { - b.OfPortHost, _, err = util.RunOVSVsctl("get", "interface", b.GwIfaceRep, "ofport") + if b.gwIfaceRep != "" { + b.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", b.gwIfaceRep, "ofport") if err != nil { - return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", b.GwIfaceRep, err) + return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", b.gwIfaceRep, err) } } else { - b.OfPortHost = nodetypes.OvsLocalPort + b.ofPortHost = nodetypes.OvsLocalPort } } @@ -410,38 +419,74 @@ func (b *BridgeConfiguration) SetOfPorts() error { func (b *BridgeConfiguration) GetIPs() []*net.IPNet { b.Mutex.Lock() defer b.Mutex.Unlock() - return b.Ips + return b.ips } func (b *BridgeConfiguration) GetBridgeName() string { - b.Mutex.Lock() - defer b.Mutex.Unlock() - return b.BridgeName + return b.bridgeName +} + +func (b *BridgeConfiguration) GetUplinkName() string { + return b.uplinkName } func (b *BridgeConfiguration) GetMAC() net.HardwareAddr { b.Mutex.Lock() defer b.Mutex.Unlock() - return b.MacAddress + return b.macAddress } func (b *BridgeConfiguration) SetMAC(macAddr net.HardwareAddr) { b.Mutex.Lock() defer b.Mutex.Unlock() - b.MacAddress = macAddr + b.macAddress = macAddr } func (b *BridgeConfiguration) SetNetworkOfPatchPort(netName string) error { b.Mutex.Lock() defer b.Mutex.Unlock() - netConfig, found := b.NetConfig[netName] + netConfig, found := b.netConfig[netName] if !found { - return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, b.BridgeName) + return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, b.bridgeName) } return netConfig.setOfPatchPort() } +func (b *BridgeConfiguration) GetInterfaceID() string { + return b.interfaceID +} + +func (b *BridgeConfiguration) GetOfPortHost() string { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.ofPortHost +} + +func (b *BridgeConfiguration) GetEIPMarkIPs() *egressip.MarkIPsCache { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.eipMarkIPs +} + +func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + b.eipMarkIPs = eipMarkIPs +} + +func (b *BridgeConfiguration) GetNextHops() []net.IP { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return b.nextHops +} + +func (b *BridgeConfiguration) SetNextHops(nextHops []net.IP) { + b.Mutex.Lock() + defer b.Mutex.Unlock() + b.nextHops = nextHops +} + func gatewayReady(patchPort string) bool { // Get ofport of patchPort ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go index 271c555e7e..d5e3e9d5cd 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -20,7 +20,7 @@ func TestDefaultBridgeConfig() *BridgeConfiguration { OfPortPatch: "patch-breth0_ov", } return &BridgeConfiguration{ - NetConfig: map[string]*BridgeUDNConfiguration{ + netConfig: map[string]*BridgeUDNConfiguration{ types.DefaultNetworkName: defaultNetConfig, }, } @@ -28,11 +28,17 @@ func TestDefaultBridgeConfig() *BridgeConfiguration { func TestBridgeConfig(brName string) *BridgeConfiguration { return &BridgeConfiguration{ - BridgeName: brName, - GwIface: brName, + bridgeName: brName, + gwIface: brName, } } +func (b *BridgeConfiguration) GetNetConfigLen() int { + b.Mutex.Lock() + defer b.Mutex.Unlock() + return len(b.netConfig) +} + func CheckUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", netName, svcCIDR.String(), expectedNFlows)) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index b642ffda70..236d7b111a 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -39,10 +39,10 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := b.OfPortPhys - bridgeMacAddress := b.MacAddress.String() - ofPortHost := b.OfPortHost - bridgeIPs := b.Ips + ofPortPhys := b.ofPortPhys + bridgeMacAddress := b.macAddress.String() + ofPortHost := b.ofPortHost + bridgeIPs := b.ips var dftFlows []string // 14 bytes of overhead for ethernet header (does not include VLAN) @@ -340,7 +340,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) } - defaultNetConfig := b.NetConfig[types.DefaultNetworkName] + defaultNetConfig := b.netConfig[types.DefaultNetworkName] // table 2, dispatch from Host -> OVN dftFlows = append(dftFlows, @@ -497,10 +497,10 @@ func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - ofPortPhys := b.OfPortPhys - bridgeMacAddress := b.MacAddress.String() - ofPortHost := b.OfPortHost - bridgeIPs := b.Ips + ofPortPhys := b.ofPortPhys + bridgeMacAddress := b.macAddress.String() + ofPortHost := b.ofPortHost + bridgeIPs := b.ips var dftFlows []string @@ -558,9 +558,9 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && b.EipMarkIPs != nil { + config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range b.EipMarkIPs.GetIPv4() { + for mark, eip := range b.eipMarkIPs.GetIPv4() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", @@ -657,9 +657,9 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && b.EipMarkIPs != nil { + config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { - for mark, eip := range b.EipMarkIPs.GetIPv6() { + for mark, eip := range b.eipMarkIPs.GetIPv6() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", @@ -736,7 +736,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // Due to the fact that ovn-controllers on different nodes apply the changes independently, // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := b.NetConfig[types.DefaultNetworkName] + defaultNetConfig := b.netConfig[types.DefaultNetworkName] if config.OVNKubernetesFeature.EnableEgressIP { for _, clusterEntry := range config.Default.ClusterSubnets { cidr := clusterEntry.CIDR diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index a476783537..4fc0004d58 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -382,7 +382,7 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops "IP fragmentation or large TCP/UDP payloads may not be forwarded correctly.") enableGatewayMTU = false } else { - chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.BridgeName) + chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.GetBridgeName()) if err != nil { return nil, nil, err } @@ -416,9 +416,9 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } if config.Default.EnableUDPAggregation { - err = setupUDPAggregationUplink(gatewayBridge.UplinkName) + err = setupUDPAggregationUplink(gatewayBridge.GetUplinkName()) if err == nil && egressGWBridge != nil { - err = setupUDPAggregationUplink(egressGWBridge.UplinkName) + err = setupUDPAggregationUplink(egressGWBridge.GetUplinkName()) } if err != nil { klog.Warningf("Could not enable UDP packet aggregation on uplink interface (aggregation will be disabled): %v", err) @@ -427,25 +427,25 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } // Set static FDB entry for LOCAL port - if err := util.SetStaticFDBEntry(gatewayBridge.bridgeName, gatewayBridge.bridgeName, gatewayBridge.macAddress); err != nil { + if err := util.SetStaticFDBEntry(gatewayBridge.GetBridgeName(), gatewayBridge.GetBridgeName(), gatewayBridge.GetMAC()); err != nil { return nil, nil, err } l3GwConfig := util.L3GatewayConfig{ Mode: config.Gateway.Mode, ChassisID: chassisID, - BridgeID: gatewayBridge.BridgeName, - InterfaceID: gatewayBridge.InterfaceID, - MACAddress: gatewayBridge.MacAddress, - IPAddresses: gatewayBridge.Ips, + BridgeID: gatewayBridge.GetBridgeName(), + InterfaceID: gatewayBridge.GetInterfaceID(), + MACAddress: gatewayBridge.GetMAC(), + IPAddresses: gatewayBridge.GetIPs(), NextHops: gwNextHops, NodePortEnable: config.Gateway.NodeportEnable, VLANID: &config.Gateway.VLANID, } if egressGWBridge != nil { - l3GwConfig.EgressGWInterfaceID = egressGWBridge.InterfaceID - l3GwConfig.EgressGWMACAddress = egressGWBridge.MacAddress - l3GwConfig.EgressGWIPAddresses = egressGWBridge.Ips + l3GwConfig.EgressGWInterfaceID = egressGWBridge.GetInterfaceID() + l3GwConfig.EgressGWMACAddress = egressGWBridge.GetMAC() + l3GwConfig.EgressGWIPAddresses = egressGWBridge.GetIPs() } err = util.SetL3GatewayConfig(nodeAnnotator, &l3GwConfig) @@ -467,11 +467,11 @@ func (g *gateway) SetDefaultGatewayBridgeMAC(macAddr net.HardwareAddr) { } func (g *gateway) SetDefaultPodNetworkAdvertised(isPodNetworkAdvertised bool) { - g.openflowManager.defaultBridge.NetConfig[types.DefaultNetworkName].Advertised.Store(isPodNetworkAdvertised) + g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Store(isPodNetworkAdvertised) } func (g *gateway) GetDefaultPodNetworkAdvertised() bool { - return g.openflowManager.defaultBridge.NetConfig[types.DefaultNetworkName].Advertised.Load() + return g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Load() } // Reconcile handles triggering updates to different components of a gateway, like OFM, Services diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index d259bc14e3..e1ff21cd49 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -58,7 +58,7 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) defaultBridge := bridgeconfig.TestDefaultBridgeConfig() - defaultBridge.MacAddress = gwMACParsed + defaultBridge.SetMAC(gwMACParsed) fNPW := nodePortWatcher{ ofportPhys: "eth0", diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 8dfe97a3f1..967828dcd0 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1474,8 +1474,8 @@ func newGateway( } } if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { - gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.BridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) - gwBridge.EipMarkIPs = gw.bridgeEIPAddrManager.GetCache() + gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.GetBridgeName(), linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gwBridge.SetEIPMarkIPs(gw.bridgeEIPAddrManager.GetCache()) } gw.nodeIPManager = newAddressManager(nodeName, kube, mgmtPort, watchFactory, gwBridge) @@ -1559,10 +1559,10 @@ func newNodePortWatcher( // Get ofport of physical interface ofportPhys, stderr, err := util.GetOVSOfPort("--if-exists", "get", - "interface", gwBridge.UplinkName, "ofport") + "interface", gwBridge.GetUplinkName(), "ofport") if err != nil { return nil, fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - gwBridge.UplinkName, stderr, err) + gwBridge.GetUplinkName(), stderr, err) } // In the shared gateway mode, the NodePort service is handled by the OpenFlow flows configured @@ -1615,7 +1615,7 @@ func newNodePortWatcher( } // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.Ips) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.GetIPs()) npw := &nodePortWatcher{ dpuMode: dpuMode, diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index a9d3b92d23..65dde1282f 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -599,7 +599,7 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) var retVal []netlink.Route var defaultAnyCIDR *net.IPNet - for _, nextHop := range udng.gateway.openflowManager.defaultBridge.NextHops { + for _, nextHop := range udng.gateway.openflowManager.defaultBridge.GetNextHops() { isV6 := utilnet.IsIPv6(nextHop) _, defaultAnyCIDR, _ = net.ParseCIDR("0.0.0.0/0") if isV6 { @@ -791,7 +791,7 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // update bridge configuration isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) - netConfig := udng.openflowManager.defaultBridge.GetNetworkBridgeConfig(udng.GetNetworkName()) + netConfig := udng.openflowManager.defaultBridge.GetNetworkConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 5622a226d7..862c4b5a7a 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -667,16 +667,16 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { @@ -707,8 +707,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -898,16 +898,16 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { @@ -938,8 +938,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1139,16 +1139,16 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.NetConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.MacAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.OfPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { @@ -1179,8 +1179,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.NetConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1380,7 +1380,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() - ofm.defaultBridge.NextHops = ovntest.MustParseIPs(config.Gateway.NextHop) + ofm.defaultBridge.SetNextHops(ovntest.MustParseIPs(config.Gateway.NextHop)) udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, &gateway{openflowManager: ofm}) Expect(err).NotTo(HaveOccurred()) mplink, err := netlink.LinkByName(mgtPort) diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index a6945531e4..770ec5924e 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -438,7 +438,8 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { // Two methods to lookup EIPs assigned to the gateway bridge. Fast path from a shared cache or slow path from node annotations. // At startup, gateway bridge cache gets sync - if c.gatewayBridge.EipMarkIPs != nil && c.gatewayBridge.EipMarkIPs.HasSyncdOnce() && c.gatewayBridge.EipMarkIPs.IsIPPresent(addr) { + eipMarkIPs := c.gatewayBridge.GetEIPMarkIPs() + if eipMarkIPs != nil && eipMarkIPs.HasSyncdOnce() && eipMarkIPs.IsIPPresent(addr) { return false } else { if eipAddresses, err := c.getPrimaryHostEgressIPs(); err != nil { diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index f7e1bccfe5..70fd383c70 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -34,19 +34,19 @@ type openflowManager struct { // UTILs Needed for UDN (also leveraged for default netInfo) in openflowmanager func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { - return c.defaultBridge.GetBridgePortConfigurations() + return c.defaultBridge.GetPortConfigurations() } func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { - return c.externalGatewayBridge.GetBridgePortConfigurations() + return c.externalGatewayBridge.GetPortConfigurations() } func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - if err := c.defaultBridge.AddNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.defaultBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } if c.externalGatewayBridge != nil { - if err := c.externalGatewayBridge.AddNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.externalGatewayBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } } @@ -54,9 +54,9 @@ func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNe } func (c *openflowManager) delNetwork(nInfo util.NetInfo) { - c.defaultBridge.DelNetworkBridgeConfig(nInfo) + c.defaultBridge.DelNetworkConfig(nInfo) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.DelNetworkBridgeConfig(nInfo) + c.externalGatewayBridge.DelNetworkConfig(nInfo) } } @@ -124,7 +124,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.BridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.GetBridgeName(), flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.flowCache) } @@ -141,7 +141,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.BridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.GetBridgeName(), flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.exGWFlowCache) } From fa6076bcc115b11321e80c71cef1fa107a3542f7 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:19:55 +0200 Subject: [PATCH 132/181] [bridgeconfig] move nextHops to the gateway where it is used. Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 17 ----------------- go-controller/pkg/node/gateway.go | 6 ++++-- go-controller/pkg/node/gateway_shared_intf.go | 4 +++- go-controller/pkg/node/gateway_udn.go | 2 +- go-controller/pkg/node/gateway_udn_test.go | 4 ++-- 5 files changed, 10 insertions(+), 23 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 979474eab5..c68b7df478 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -81,13 +81,11 @@ type BridgeConfiguration struct { ofPortPhys string netConfig map[string]*BridgeUDNConfiguration eipMarkIPs *egressip.MarkIPsCache - nextHops []net.IP } func NewBridgeConfiguration(intfName, nodeName, physicalNetworkName string, nodeSubnets, gwIPs []*net.IPNet, - gwNextHops []net.IP, advertised bool) (*BridgeConfiguration, error) { var intfRep string var err error @@ -106,9 +104,6 @@ func NewBridgeConfiguration(intfName, nodeName, }, eipMarkIPs: egressip.NewMarkIPsCache(), } - if len(gwNextHops) > 0 { - res.nextHops = gwNextHops - } res.netConfig[types.DefaultNetworkName].Advertised.Store(advertised) if config.Gateway.GatewayAcceleratedInterface != "" { @@ -475,18 +470,6 @@ func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { b.eipMarkIPs = eipMarkIPs } -func (b *BridgeConfiguration) GetNextHops() []net.IP { - b.Mutex.Lock() - defer b.Mutex.Unlock() - return b.nextHops -} - -func (b *BridgeConfiguration) SetNextHops(nextHops []net.IP) { - b.Mutex.Lock() - defer b.Mutex.Unlock() - b.nextHops = nextHops -} - func gatewayReady(patchPort string) bool { // Get ofport of patchPort ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 4fc0004d58..9b43fc95a5 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -60,6 +60,8 @@ type gateway struct { watchFactory *factory.WatchFactory // used for retry stopChan <-chan struct{} wg *sync.WaitGroup + + nextHops []net.IP } func (g *gateway) AddService(svc *corev1.Service) error { @@ -357,13 +359,13 @@ func setupUDPAggregationUplink(ifname string) error { func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops []net.IP, nodeSubnets, gwIPs []*net.IPNet, advertised bool, nodeAnnotator kube.Annotator) ( *bridgeconfig.BridgeConfiguration, *bridgeconfig.BridgeConfiguration, error) { - gatewayBridge, err := bridgeconfig.NewBridgeConfiguration(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) + gatewayBridge, err := bridgeconfig.NewBridgeConfiguration(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, advertised) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", gwIntf, err) } var egressGWBridge *bridgeconfig.BridgeConfiguration if egressGatewayIntf != "" { - egressGWBridge, err = bridgeconfig.NewBridgeConfiguration(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) + egressGWBridge, err = bridgeconfig.NewBridgeConfiguration(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, false) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", egressGatewayIntf, err) } diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 967828dcd0..278a3cbd44 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -1425,7 +1425,9 @@ func newGateway( gatewayMode config.GatewayMode, ) (*gateway, error) { klog.Info("Creating new gateway") - gw := &gateway{} + gw := &gateway{ + nextHops: gwNextHops, + } if gatewayMode == config.GatewayModeLocal { if err := initLocalGateway(subnets, mgmtPort); err != nil { diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 65dde1282f..f10326d1ed 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -599,7 +599,7 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) var retVal []netlink.Route var defaultAnyCIDR *net.IPNet - for _, nextHop := range udng.gateway.openflowManager.defaultBridge.GetNextHops() { + for _, nextHop := range udng.gateway.nextHops { isV6 := utilnet.IsIPv6(nextHop) _, defaultAnyCIDR, _ = net.ParseCIDR("0.0.0.0/0") if isV6 { diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 862c4b5a7a..34848faf7e 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -1380,8 +1380,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() - ofm.defaultBridge.SetNextHops(ovntest.MustParseIPs(config.Gateway.NextHop)) - udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, &gateway{openflowManager: ofm}) + udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, + &gateway{openflowManager: ofm, nextHops: ovntest.MustParseIPs(config.Gateway.NextHop)}) Expect(err).NotTo(HaveOccurred()) mplink, err := netlink.LinkByName(mgtPort) Expect(err).NotTo(HaveOccurred()) From a0c90f267df26a6b7d8eecbb18c67bd23ab2ccd7 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:25:31 +0200 Subject: [PATCH 133/181] [bridgeconfig] make mutex internal. syncFlows only directly uses already protected GetBridgeName() method for bridgeConfig, and flow updates should be protected by the flowMutex. So hopefully I am not breaking anything... Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 66 +++++++++---------- .../bridgeconfig/bridgeconfig_testutil.go | 4 +- .../pkg/node/bridgeconfig/bridgeflows.go | 12 ++-- go-controller/pkg/node/openflow_manager.go | 7 -- 4 files changed, 41 insertions(+), 48 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index c68b7df478..351a44c981 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -63,7 +63,7 @@ func (netConfig *BridgeUDNConfiguration) setOfPatchPort() error { } type BridgeConfiguration struct { - Mutex sync.Mutex + mutex sync.Mutex // variables that are only set on creation and never changed // don't require mutex lock to read @@ -234,8 +234,8 @@ func (b *BridgeConfiguration) GetGatewayIface() string { // UpdateInterfaceIPAddresses sets and returns the bridge's current ips func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) if err != nil { return nil, err @@ -265,8 +265,8 @@ func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]* // GetPortConfigurations returns a slice of Network port configurations along with the // uplinkName and physical port's ofport value func (b *BridgeConfiguration) GetPortConfigurations() ([]*BridgeUDNConfiguration, string, string) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() var netConfigs []*BridgeUDNConfiguration for _, netConfig := range b.netConfig { netConfigs = append(netConfigs, netConfig.ShallowCopy()) @@ -280,8 +280,8 @@ func (b *BridgeConfiguration) AddNetworkConfig( nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() netName := nInfo.GetNetworkName() patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) @@ -309,15 +309,15 @@ func (b *BridgeConfiguration) AddNetworkConfig( // DelNetworkConfig deletes the provided netInfo from the bridge configuration cache func (b *BridgeConfiguration) DelNetworkConfig(nInfo util.NetInfo) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() delete(b.netConfig, nInfo.GetNetworkName()) } func (b *BridgeConfiguration) GetNetworkConfig(networkName string) *BridgeUDNConfiguration { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.netConfig[networkName] } @@ -327,8 +327,8 @@ func (b *BridgeConfiguration) GetNetworkConfig(networkName string) *BridgeUDNCon // NOTE: if the network configuration can't be found or if the network is not patched by OVN // yet this returns nil. func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName string) *BridgeUDNConfiguration { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() if netConfig, found := b.netConfig[networkName]; found && netConfig.OfPortPatch != "" { return netConfig.ShallowCopy() @@ -337,8 +337,8 @@ func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName strin } func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() result := make([]*BridgeUDNConfiguration, 0, len(b.netConfig)) for _, netConfig := range b.netConfig { if netConfig.OfPortPatch == "" { @@ -352,8 +352,8 @@ func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { // IsGatewayReady checks if patch ports of every netConfig are present. // used by gateway on newGateway readyFunc func (b *BridgeConfiguration) IsGatewayReady() bool { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() for _, netConfig := range b.netConfig { ready := gatewayReady(netConfig.PatchPort) if !ready { @@ -364,8 +364,8 @@ func (b *BridgeConfiguration) IsGatewayReady() bool { } func (b *BridgeConfiguration) SetOfPorts() error { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() // Get ofport of patchPort for _, netConfig := range b.netConfig { if err := netConfig.setOfPatchPort(); err != nil { @@ -412,8 +412,8 @@ func (b *BridgeConfiguration) SetOfPorts() error { } func (b *BridgeConfiguration) GetIPs() []*net.IPNet { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.ips } @@ -426,20 +426,20 @@ func (b *BridgeConfiguration) GetUplinkName() string { } func (b *BridgeConfiguration) GetMAC() net.HardwareAddr { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.macAddress } func (b *BridgeConfiguration) SetMAC(macAddr net.HardwareAddr) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() b.macAddress = macAddr } func (b *BridgeConfiguration) SetNetworkOfPatchPort(netName string) error { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() netConfig, found := b.netConfig[netName] if !found { @@ -453,20 +453,20 @@ func (b *BridgeConfiguration) GetInterfaceID() string { } func (b *BridgeConfiguration) GetOfPortHost() string { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.ofPortHost } func (b *BridgeConfiguration) GetEIPMarkIPs() *egressip.MarkIPsCache { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.eipMarkIPs } func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() b.eipMarkIPs = eipMarkIPs } diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go index d5e3e9d5cd..d01c73861e 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -34,8 +34,8 @@ func TestBridgeConfig(brName string) *BridgeConfiguration { } func (b *BridgeConfiguration) GetNetConfigLen() int { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return len(b.netConfig) } diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index 236d7b111a..84a7b4ea9c 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -15,8 +15,8 @@ import ( ) func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extraIPs []net.IP) ([]string, error) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() dftFlows, err := b.flowsForDefaultBridge(extraIPs) if err != nil { return nil, err @@ -29,8 +29,8 @@ func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extra } func (b *BridgeConfiguration) ExternalBridgeFlows(hostSubnets []*net.IPNet) ([]string, error) { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() return b.commonFlows(hostSubnets) } @@ -861,8 +861,8 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { - b.Mutex.Lock() - defer b.Mutex.Unlock() + b.mutex.Lock() + defer b.mutex.Unlock() var flows []string if config.Gateway.Mode != config.GatewayModeShared { return nil diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 70fd383c70..de3a721519 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -112,10 +112,6 @@ func (c *openflowManager) requestFlowSync() { } func (c *openflowManager) syncFlows() { - // protect gwBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Mutex.Lock() - defer c.defaultBridge.Mutex.Unlock() - c.flowMutex.Lock() defer c.flowMutex.Unlock() @@ -130,9 +126,6 @@ func (c *openflowManager) syncFlows() { } if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Mutex.Lock() - defer c.externalGatewayBridge.Mutex.Unlock() - c.exGWFlowMutex.Lock() defer c.exGWFlowMutex.Unlock() From fd5e7915436a832b7cd18b313a5917532038b62f Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:36:58 +0200 Subject: [PATCH 134/181] [node/gateway] nodePortWatcher should use its own bridgeConfiguration. It used to require addressManager to updateGatewayIPs only to get bridgeConfig from it. We can just give nodePortWatcher its own reference to the bridgeConfig. Signed-off-by: Nadia Pinaeva --- .../pkg/node/gateway_localnet_linux_test.go | 1 + go-controller/pkg/node/gateway_shared_intf.go | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index e1ff21cd49..49e4d1ee13 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -70,6 +70,7 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher defaultBridge: defaultBridge, }, networkManager: networkmanager.Default().Interface(), + gwBridge: bridgeconfig.TestBridgeConfig(""), } return &fNPW } diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 278a3cbd44..d60783144c 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -190,7 +190,7 @@ type nodePortWatcher struct { gatewayIPv6 string gatewayIPLock sync.Mutex ofportPhys string - gwBridge string + gwBridge *bridgeconfig.BridgeConfiguration // Map of service name to programmed iptables/OF rules serviceInfo map[ktypes.NamespacedName]*serviceConfig serviceInfoLock sync.Mutex @@ -216,9 +216,9 @@ type cidrAndFlags struct { validLifetime int } -func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { +func (npw *nodePortWatcher) updateGatewayIPs() { // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.GetIPs()) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(npw.gwBridge.GetIPs()) npw.gatewayIPLock.Lock() defer npw.gatewayIPLock.Unlock() @@ -368,7 +368,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI var ofPorts []string // don't get the ports unless we need to as it is a costly operation if (len(extParsedIPs) > 0 || len(ingParsedIPs) > 0) && add { - ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge, false) + ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge.GetGatewayIface(), false) if err != nil { // in the odd case that getting all ports from the bridge should not work, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) @@ -1517,7 +1517,7 @@ func newGateway( } if gw.nodePortWatcher != nil { npw, _ := gw.nodePortWatcher.(*nodePortWatcher) - npw.updateGatewayIPs(gw.nodeIPManager) + npw.updateGatewayIPs() } // Services create OpenFlow flows as well, need to update them all if gw.servicesRetryFramework != nil { @@ -1624,7 +1624,7 @@ func newNodePortWatcher( gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - gwBridge: gwBridge.GetGatewayIface(), + gwBridge: gwBridge, serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, From f531e3d338e2d73272bda29570089ed15ebb0b36 Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:43:20 +0200 Subject: [PATCH 135/181] [node/gateway] make PatchedNetConfigs internal, remove locking Signed-off-by: Nadia Pinaeva --- .../pkg/node/bridgeconfig/bridgeconfig.go | 5 ++- .../pkg/node/bridgeconfig/bridgeflows.go | 36 +++++++++---------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 351a44c981..92455b9be6 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -336,9 +336,8 @@ func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName strin return nil } -func (b *BridgeConfiguration) PatchedNetConfigs() []*BridgeUDNConfiguration { - b.mutex.Lock() - defer b.mutex.Unlock() +// must be called with mutex held +func (b *BridgeConfiguration) patchedNetConfigs() []*BridgeUDNConfiguration { result := make([]*BridgeUDNConfiguration, 0, len(b.netConfig)) for _, netConfig := range b.netConfig { if netConfig.OfPortPatch == "" { diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index 84a7b4ea9c..d03b88c8de 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -79,7 +79,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string if err != nil { return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ @@ -103,7 +103,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string continue } - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", @@ -142,7 +142,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", @@ -165,7 +165,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string continue } - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", @@ -216,7 +216,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -249,7 +249,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services masqDst = masqSubnet } - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ @@ -272,7 +272,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string dftFlows = append(dftFlows, reassemblyFlows...) } if ofPortPhys != "" { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { var actions string if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) @@ -351,7 +351,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. if config.IPv4Mode { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -389,7 +389,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string } if config.IPv6Mode { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } @@ -516,7 +516,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e if ofPortPhys != "" { // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports actions := "" - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { actions += "output:" + netConfig.OfPortPatch + "," } @@ -528,7 +528,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) @@ -543,7 +543,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // SNATs these into egressIP prior to reaching external bridge. @@ -602,7 +602,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, @@ -642,7 +642,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) } if ofPortPhys != "" { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table0, packets coming from egressIP pods that have mark 1008 on them // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR // DNATs these into egressIP prior to reaching external bridge. @@ -701,7 +701,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, @@ -761,7 +761,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } if ofPortPhys != "" { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { isNetworkAdvertised := netConfig.Advertised.Load() // disableSNATMultipleGWs only applies to default network disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs @@ -839,7 +839,7 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost)) // Send UDN destined traffic to right patch port - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { if netConfig.MasqCTMark != nodetypes.CtMarkOVN { dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ @@ -868,7 +868,7 @@ func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { return nil } for _, addr := range ipAddrs { - for _, netConfig := range b.PatchedNetConfigs() { + for _, netConfig := range b.patchedNetConfigs() { flows = append(flows, nodeutil.GenerateICMPFragmentationFlow(addr, nodetypes.OutputPortDrop, netConfig.OfPortPatch, nodetypes.PmtudOpenFlowCookie, 700)) } From 33e20b8361a2939d774bdb4bae9930a61fcb914f Mon Sep 17 00:00:00 2001 From: Nadia Pinaeva Date: Wed, 2 Jul 2025 17:57:13 +0200 Subject: [PATCH 136/181] [bridgeconfig] AI suggested fixes. store Advertised values to the copy and not to the original object. isIPv6 should be true in ipv6 case. Signed-off-by: Nadia Pinaeva --- go-controller/pkg/node/bridgeconfig/bridgeconfig.go | 2 +- go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 92455b9be6..4cad9037ad 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -44,7 +44,7 @@ func (netConfig *BridgeUDNConfiguration) ShallowCopy() *BridgeUDNConfiguration { Subnets: netConfig.Subnets, NodeSubnets: netConfig.NodeSubnets, } - netConfig.Advertised.Store(netConfig.Advertised.Load()) + copy.Advertised.Store(netConfig.Advertised.Load()) return copy } diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go index d01c73861e..8395baf06d 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -80,7 +80,7 @@ func CheckAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDN Expect(err).ToNot(HaveOccurred()) protoPrefix = "ip" } else { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) Expect(err).ToNot(HaveOccurred()) protoPrefix = "ip6" } From 290eb0385d3107e6f9ff2525a74e0b9f24c94966 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Mon, 28 Apr 2025 13:09:45 -0400 Subject: [PATCH 137/181] Add metrics for UDN Signed-off-by: Dan Winship --- .../userdefinednetwork/controller.go | 23 ++++++++++ go-controller/pkg/metrics/cluster_manager.go | 44 +++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go index e8c1d74a03..67292bd2ed 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go @@ -37,6 +37,7 @@ import ( userdefinednetworkscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/scheme" userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -389,6 +390,14 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, nil } + var role, topology string + if udn.Spec.Layer2 != nil { + role = string(udn.Spec.Layer2.Role) + } else if udn.Spec.Layer3 != nil { + role = string(udn.Spec.Layer3.Role) + } + topology = string(udn.Spec.Topology) + if !udn.DeletionTimestamp.IsZero() { // udn is being deleted if controllerutil.ContainsFinalizer(udn, template.FinalizerUserDefinedNetwork) { if err := c.deleteNAD(udn, udn.Namespace); err != nil { @@ -401,6 +410,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to remove finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Finalizer removed from UserDefinedNetworks [%s/%s]", udn.Namespace, udn.Name) + metrics.DecrementUDNCount(role, topology) } return nil, nil @@ -412,6 +422,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to add finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Added Finalizer to UserDefinedNetwork [%s/%s]", udn.Namespace, udn.Name) + metrics.IncrementUDNCount(role, topology) } return c.updateNAD(udn, udn.Namespace) @@ -539,6 +550,16 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine cudnName := cudn.Name affectedNamespaces := c.namespaceTracker[cudnName] + var role, topology string + if cudn.Spec.Network.Layer2 != nil { + role = string(cudn.Spec.Network.Layer2.Role) + } else if cudn.Spec.Network.Layer3 != nil { + role = string(cudn.Spec.Network.Layer3.Role) + } else if cudn.Spec.Network.Localnet != nil { + role = string(cudn.Spec.Network.Localnet.Role) + } + topology = string(cudn.Spec.Network.Topology) + if !cudn.DeletionTimestamp.IsZero() { if controllerutil.ContainsFinalizer(cudn, template.FinalizerUserDefinedNetwork) { var errs []error @@ -564,6 +585,7 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine } klog.Infof("Finalizer removed from ClusterUserDefinedNetwork %q", cudn.Name) delete(c.namespaceTracker, cudnName) + metrics.DecrementCUDNCount(role, topology) } return nil, nil @@ -581,6 +603,7 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine return nil, fmt.Errorf("failed to add finalizer to ClusterUserDefinedNetwork %q: %w", cudnName, err) } klog.Infof("Added Finalizer to ClusterUserDefinedNetwork %q", cudnName) + metrics.IncrementCUDNCount(role, topology) } selectedNamespaces, err := c.getSelectedNamespaces(cudn.Spec.NamespaceSelector) diff --git a/go-controller/pkg/metrics/cluster_manager.go b/go-controller/pkg/metrics/cluster_manager.go index f97a338b89..711d4dc026 100644 --- a/go-controller/pkg/metrics/cluster_manager.go +++ b/go-controller/pkg/metrics/cluster_manager.go @@ -91,6 +91,28 @@ var metricEgressIPRebalanceCount = prometheus.NewCounter(prometheus.CounterOpts{ /** EgressIP metrics recorded from cluster-manager ends**/ +var metricUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, + Name: "user_defined_networks", + Help: "The total number of UserDefinedNetworks in the cluster"}, + []string{ + "role", + "topology", + }, +) + +var metricCUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, + Name: "cluster_user_defined_networks", + Help: "The total number of ClusterUserDefinedNetworks in the cluster"}, + []string{ + "role", + "topology", + }, +) + // RegisterClusterManagerBase registers ovnkube cluster manager base metrics with the Prometheus registry. // This function should only be called once. func RegisterClusterManagerBase() { @@ -130,6 +152,8 @@ func RegisterClusterManagerFunctional() { prometheus.MustRegister(metricEgressIPRebalanceCount) prometheus.MustRegister(metricEgressIPCount) } + prometheus.MustRegister(metricUDNCount) + prometheus.MustRegister(metricCUDNCount) if err := prometheus.Register(MetricResourceRetryFailuresCount); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) @@ -165,3 +189,23 @@ func RecordEgressIPRebalance(count int) { func RecordEgressIPCount(count float64) { metricEgressIPCount.Set(count) } + +// IncrementUDNCount increments the number of UserDefinedNetworks of the given type +func IncrementUDNCount(role, topology string) { + metricUDNCount.WithLabelValues(role, topology).Inc() +} + +// DecrementUDNCount decrements the number of UserDefinedNetworks of the given type +func DecrementUDNCount(role, topology string) { + metricUDNCount.WithLabelValues(role, topology).Dec() +} + +// IncrementCUDNCount increments the number of ClusterUserDefinedNetworks of the given type +func IncrementCUDNCount(role, topology string) { + metricCUDNCount.WithLabelValues(role, topology).Inc() +} + +// DecrementCUDNCount decrements the number of ClusterUserDefinedNetworks of the given type +func DecrementCUDNCount(role, topology string) { + metricCUDNCount.WithLabelValues(role, topology).Dec() +} From 527c19fcff607cfd96ce36236fbd1441cb198f16 Mon Sep 17 00:00:00 2001 From: Alin Serdean Date: Fri, 18 Jul 2025 13:02:13 +0200 Subject: [PATCH 138/181] Add support for --disable-requestedchassis flag in ovnkube controller This commit adds conditional logic to pass the --disable-requestedchassis flag to the ovnkube controller when the ovn_disable_requestedchassis environment variable is set to "true". The flag is added to the ovnkube-controller-with-node function in dist/images/ovnkube.sh, following the same pattern as other similar configuration flags like --enable-stateless-netpol. This flag is extremely useful when ovnkube is running in DPU mode since its chassis name will differ from the hostname of the DPU host. Signed-off-by: Alin Serdean --- dist/images/ovnkube.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 3931d4e180..e016ce4a47 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -2097,6 +2097,12 @@ ovnkube-controller-with-node() { ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" fi + ovn_disable_requestedchassis_flag= + if [[ ${ovn_disable_requestedchassis} == "true" ]]; then + ovn_disable_requestedchassis_flag="--disable-requestedchassis" + fi + echo "ovn_disable_requestedchassis_flag=${ovn_disable_requestedchassis_flag}" + echo "=============== ovnkube-controller-with-node --init-ovnkube-controller-with-node==========" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} --init-node ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -2150,6 +2156,7 @@ ovnkube-controller-with-node() { ${ssl_opts} \ ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ + ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --export-ovs-metrics \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ From 293f6dda5712dcc8abe8302d5656dfcf1fb9aa40 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 17 Jul 2025 19:33:31 -0400 Subject: [PATCH 139/181] ci: run tests only if files other than docs are changed Note: merge_group doesn't support path filters hence it will still run the jobs for queue patches. Signed-off-by: Ihar Hrachyshka --- .github/workflows/test.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e2a0067ee6..d9d5d40eec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,6 +4,10 @@ on: merge_group: pull_request: branches: [ master ] + # Only run jobs if at least one non-doc file is changed + paths-ignore: + - '**/*.md' + - 'mkdocs.yml' schedule: - cron: '0 */12 * * *' workflow_dispatch: From ec378a7bbde21b2fc42d830caaa6cfe16bf3e7ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 17:56:44 +0000 Subject: [PATCH 140/181] Bump golang.org/x/oauth2 Bumps the go_modules group with 1 update in the /test/conformance directory: [golang.org/x/oauth2](https://github.com/golang/oauth2). Updates `golang.org/x/oauth2` from 0.12.0 to 0.27.0 - [Commits](https://github.com/golang/oauth2/compare/v0.12.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-version: 0.27.0 dependency-type: indirect dependency-group: go_modules ... Signed-off-by: dependabot[bot] --- test/conformance/go.mod | 3 +-- test/conformance/go.sum | 9 ++------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/test/conformance/go.mod b/test/conformance/go.mod index b3763a3068..de64ed280e 100644 --- a/test/conformance/go.mod +++ b/test/conformance/go.mod @@ -39,12 +39,11 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/conformance/go.sum b/test/conformance/go.sum index 1e5b55a8e9..175ec601cc 100644 --- a/test/conformance/go.sum +++ b/test/conformance/go.sum @@ -23,7 +23,6 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -106,14 +105,13 @@ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2F golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -125,7 +123,6 @@ golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= @@ -141,8 +138,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= From 3d32558bd903bcc0703bbc40c4cd3b98064f0b67 Mon Sep 17 00:00:00 2001 From: arkadeepsen Date: Thu, 12 Jun 2025 11:04:09 +0530 Subject: [PATCH 141/181] Remove routes of ex gw pods in terminating or not ready state Signed-off-by: arkadeepsen --- .../apbroute/external_controller.go | 9 +- .../apbroute/external_controller_pod.go | 10 + go-controller/pkg/ovn/egressgw.go | 8 + go-controller/pkg/ovn/ovn.go | 17 +- .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 418 ++++++++++++++++++ go-controller/vendor/modules.txt | 1 + 6 files changed, 459 insertions(+), 4 deletions(-) create mode 100644 go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller.go b/go-controller/pkg/ovn/controller/apbroute/external_controller.go index cd034d67b7..73f6208e96 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/adminpolicybasedroute/v1" @@ -565,10 +566,14 @@ func (m *externalPolicyManager) onPodUpdate(oldObj, newObj interface{}) { utilruntime.HandleError(errors.New("invalid Pod provided to onPodUpdate()")) return } - // if labels AND assigned Pod IPs AND the multus network status annotations are the same, skip processing changes to the pod. + // if labels AND assigned Pod IPs AND the multus network status annotations AND + // pod PodReady condition AND deletion timestamp (PodTerminating) are + // the same, skip processing changes to the pod. if reflect.DeepEqual(o.Labels, n.Labels) && reflect.DeepEqual(o.Status.PodIPs, n.Status.PodIPs) && - reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) { + reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) && + reflect.DeepEqual(v1pod.GetPodReadyCondition(o.Status), v1pod.GetPodReadyCondition(n.Status)) && + reflect.DeepEqual(o.DeletionTimestamp, n.DeletionTimestamp) { return } m.podQueue.Add(n) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go index 9c49c474ba..2b2915f521 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go @@ -11,7 +11,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.TypedRateLimitingInterface[string]) error { @@ -28,6 +31,13 @@ func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.Ty } func getExGwPodIPs(gatewayPod *corev1.Pod, networkName string) (sets.Set[string], error) { + // If an external gateway pod is in terminating or not ready state then don't return the + // IPs for the external gateway pod + if util.PodTerminating(gatewayPod) || !v1pod.IsPodReadyConditionTrue(gatewayPod.Status) { + klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", gatewayPod.Namespace, gatewayPod.Name) + return nil, nil + } + if networkName != "" { return getMultusIPsFromNetworkName(gatewayPod, networkName) } diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index 2b8e939585..b607a3b253 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -15,6 +15,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" @@ -49,6 +50,13 @@ func (oc *DefaultNetworkController) addPodExternalGW(pod *corev1.Pod) error { klog.Infof("External gateway pod: %s, detected for namespace(s) %s", pod.Name, podRoutingNamespaceAnno) + // If an external gateway pod is in terminating or not ready state then don't add the + // routes for the external gateway pod + if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { + klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", pod.Namespace, pod.Name) + return nil + } + foundGws, err := getExGwPodIPs(pod) if err != nil { klog.Errorf("Error getting exgw IPs for pod: %s, error: %v", pod.Name, err) diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 293e23f4aa..07b7b6a83b 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -16,6 +16,7 @@ import ( listers "k8s.io/client-go/listers/core/v1" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" @@ -117,6 +118,10 @@ func networkStatusAnnotationsChanged(oldPod, newPod *corev1.Pod) bool { return oldPod.Annotations[nettypes.NetworkStatusAnnot] != newPod.Annotations[nettypes.NetworkStatusAnnot] } +func podBecameReady(oldPod, newPod *corev1.Pod) bool { + return !v1pod.IsPodReadyConditionTrue(oldPod.Status) && v1pod.IsPodReadyConditionTrue(newPod.Status) +} + // ensurePod tries to set up a pod. It returns nil on success and error on failure; failure // indicates the pod set up should be retried later. func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort bool) error { @@ -131,6 +136,14 @@ func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort b return oc.ensureRemotePodIP(oldPod, pod, addPort) } + // If an external gateway pod is in terminating or not ready state then remove the + // routes for the external gateway pod + if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { + if err := oc.deletePodExternalGW(pod); err != nil { + return fmt.Errorf("ensurePod failed %s/%s: %w", pod.Namespace, pod.Name, err) + } + } + if oc.isPodScheduledinLocalZone(pod) { klog.V(5).Infof("Ensuring zone local for Pod %s/%s in node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) return oc.ensureLocalZonePod(oldPod, pod, addPort) @@ -170,7 +183,7 @@ func (oc *DefaultNetworkController) ensureLocalZonePod(oldPod, pod *corev1.Pod, } } else { // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { if err := oc.addPodExternalGW(pod); err != nil { return fmt.Errorf("addPodExternalGW failed for %s/%s: %w", pod.Namespace, pod.Name, err) } @@ -237,7 +250,7 @@ func (oc *DefaultNetworkController) ensureRemoteZonePod(oldPod, pod *corev1.Pod, } // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { // check if this remote pod is serving as an external GW. If so add the routes in the namespace // associated with this remote pod if err := oc.addPodExternalGW(pod); err != nil { diff --git a/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go new file mode 100644 index 0000000000..c2fe519714 --- /dev/null +++ b/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -0,0 +1,418 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + // also support sidecar container (initContainer with restartPolicy=Always) + for _, container := range pod.Spec.InitContainers { + if container.RestartPolicy == nil || *container.RestartPolicy != v1.ContainerRestartPolicyAlways { + continue + } + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// ContainerType signifies container type +type ContainerType int + +const ( + // Containers is for normal containers + Containers ContainerType = 1 << iota + // InitContainers is for init containers + InitContainers + // EphemeralContainers is for ephemeral containers + EphemeralContainers +) + +// AllContainers specifies that all containers be visited +const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers + +// AllFeatureEnabledContainers returns a ContainerType mask which includes all container +// types except for the ones guarded by feature gate. +func AllFeatureEnabledContainers() ContainerType { + return AllContainers +} + +// ContainerVisitor is called with each container spec, and returns true +// if visiting should continue. +type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool) + +// Visitor is called with each object name, and returns true if visiting should continue +type Visitor func(name string) (shouldContinue bool) + +func skipEmptyNames(visitor Visitor) Visitor { + return func(name string) bool { + if len(name) == 0 { + // continue visiting + return true + } + // delegate to visitor + return visitor(name) + } +} + +// VisitContainers invokes the visitor function with a pointer to every container +// spec in the given pod spec with type set in mask. If visitor returns false, +// visiting is short-circuited. VisitContainers returns true if visiting completes, +// false if visiting was short-circuited. +func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool { + if mask&InitContainers != 0 { + for i := range podSpec.InitContainers { + if !visitor(&podSpec.InitContainers[i], InitContainers) { + return false + } + } + } + if mask&Containers != 0 { + for i := range podSpec.Containers { + if !visitor(&podSpec.Containers[i], Containers) { + return false + } + } + } + if mask&EphemeralContainers != 0 { + for i := range podSpec.EphemeralContainers { + if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) { + return false + } + } + } + return true +} + +// VisitPodSecretNames invokes the visitor function with the name of every secret +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { + visitor = skipEmptyNames(visitor) + for _, reference := range pod.Spec.ImagePullSecrets { + if !visitor(reference.Name) { + return false + } + } + VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { + return visitContainerSecretNames(c, visitor) + }) + var source *v1.VolumeSource + + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.AzureFile != nil: + if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { + return false + } + case source.CephFS != nil: + if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { + return false + } + case source.Cinder != nil: + if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { + return false + } + case source.FlexVolume != nil: + if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { + return false + } + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].Secret != nil { + if !visitor(source.Projected.Sources[j].Secret.Name) { + return false + } + } + } + case source.RBD != nil: + if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { + return false + } + case source.Secret != nil: + if !visitor(source.Secret.SecretName) { + return false + } + case source.ScaleIO != nil: + if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { + return false + } + case source.ISCSI != nil: + if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { + return false + } + case source.StorageOS != nil: + if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { + return false + } + case source.CSI != nil: + if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) { + return false + } + } + } + return true +} + +// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference +func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.SecretRef != nil { + if !visitor(env.SecretRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { + if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { + return false + } + } + } + return true +} + +// VisitPodConfigmapNames invokes the visitor function with the name of every configmap +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { + visitor = skipEmptyNames(visitor) + VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { + return visitContainerConfigmapNames(c, visitor) + }) + var source *v1.VolumeSource + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].ConfigMap != nil { + if !visitor(source.Projected.Sources[j].ConfigMap.Name) { + return false + } + } + } + case source.ConfigMap != nil: + if !visitor(source.ConfigMap.Name) { + return false + } + } + } + return true +} + +// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference +func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.ConfigMapRef != nil { + if !visitor(env.ConfigMapRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { + if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { + return false + } + } + } + return true +} + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It returns true if "name" exists, else returns false. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// It also returns if "name" exists. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + status, _ := GetContainerStatus(statuses, name) + return status +} + +// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses", +// It returns (index, true) if "name" exists, else returns (0, false). +func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) { + for i := range statuses { + if statuses[i].Name == name { + return i, true + } + } + return 0, false +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress. +func IsPodTerminal(pod *v1.Pod) bool { + return IsPodPhaseTerminal(pod.Status.Phase) +} + +// IsPodPhaseTerminal returns true if the pod's phase is terminal. +func IsPodPhaseTerminal(phase v1.PodPhase) bool { + return phase == v1.PodFailed || phase == v1.PodSucceeded +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsContainersReadyConditionTrue(status v1.PodStatus) bool { + condition := GetContainersReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.ContainersReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual +} + +// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways. +// This function is not checking if the container passed to it is indeed an init container. +// It is just checking if the container restart policy has been set to always. +func IsRestartableInitContainer(initContainer *v1.Container) bool { + if initContainer == nil || initContainer.RestartPolicy == nil { + return false + } + return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways +} diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 5732a53975..53c3f3b497 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -1176,6 +1176,7 @@ k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubernetes v1.32.6 ## explicit; go 1.23.0 +k8s.io/kubernetes/pkg/api/v1/pod k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/probe k8s.io/kubernetes/pkg/probe/http From d942a7d81433a4f1dc1d12e59de1feede60254f3 Mon Sep 17 00:00:00 2001 From: arkadeepsen Date: Thu, 12 Jun 2025 18:07:52 +0530 Subject: [PATCH 142/181] Add unit tests for ex gw pods in terminating or not ready state Signed-off-by: arkadeepsen --- .../external_controller_namespace_test.go | 22 +- .../apbroute/external_controller_pod_test.go | 187 ++++++ .../external_controller_policy_test.go | 12 +- go-controller/pkg/ovn/egressgw_test.go | 586 ++++++++++++++++++ go-controller/pkg/ovn/pods_test.go | 6 + 5 files changed, 809 insertions(+), 4 deletions(-) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go index 57ab01d93b..6f521bf2bb 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go @@ -201,14 +201,32 @@ var _ = Describe("OVN External Gateway namespace", func() { "k8s.ovn.org/routing-network": "", nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, annotatedPodIP)}, }, - Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, Phase: corev1.PodRunning}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } podGW = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: namespaceGW.Name, Labels: map[string]string{"name": "pod"}, Annotations: map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, dynamicHopHostNetPodIP)}}, - Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, Phase: corev1.PodRunning}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } namespaceTargetWithPod, namespaceTarget2WithPod, namespaceTarget2WithoutPod, namespaceGWWithPod *namespaceWithPods ) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go index 509940c730..7cbbcd7430 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go @@ -448,6 +448,163 @@ var _ = Describe("OVN External Gateway pod", func() { }) }) + + var _ = Context("When pod goes into terminating or not ready state", func() { + + DescribeTable("reconciles a pod gateway in terminating or not ready state that matches two policies", func( + terminating bool, + ) { + initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1}, + []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) + + expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWithPod}, false) + + expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWithPod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + + DescribeTable("reconciles a pod gateway in terminating or not ready state that does not match any policy", func( + terminating bool, + ) { + noMatchPolicy := newPolicy( + "noMatchPolicy", + &metav1.LabelSelector{MatchLabels: targetNamespace1Match}, + nil, + &metav1.LabelSelector{MatchLabels: gatewayNamespaceMatch}, + &metav1.LabelSelector{MatchLabels: map[string]string{"key": "nomatch"}}, + false, + ) + initController([]runtime.Object{namespaceGW, namespaceTarget, pod1}, []runtime.Object{noMatchPolicy}) + + expectedPolicy, expectedRefs := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithoutPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + eventuallyExpectNumberOfPolicies(1) + eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + // make sure pod event is handled + time.Sleep(100 * time.Millisecond) + + eventuallyExpectNumberOfPolicies(1) + eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + + DescribeTable("reconciles a pod gateway in terminating or not ready state that is one of two pods that matches two policies", func( + terminating bool, + ) { + initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1, pod2}, + []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) + namespaceGWWith2Pods := newNamespaceWithPods(namespaceGW.Name, pod1, pod2) + expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + + namespaceGWWith1Pod := newNamespaceWithPods(namespaceGW.Name, pod2) + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith1Pod}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith1Pod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Removing deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, nil, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as ready") + setPodConditionReady(pod1, corev1.ConditionTrue, fakeClient) + } + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + }) }) func deletePod(pod *corev1.Pod, fakeClient *fake.Clientset) { @@ -478,6 +635,36 @@ func updatePodStatus(pod *corev1.Pod, podStatus corev1.PodStatus) { Expect(err).NotTo(HaveOccurred()) } +func setPodDeletionTimestamp(pod *corev1.Pod, deletionTimestamp *metav1.Time, fakeClient *fake.Clientset) { + p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + incrementResourceVersion(p) + p.DeletionTimestamp = deletionTimestamp + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + +func setPodConditionReady(pod *corev1.Pod, condStatus corev1.ConditionStatus, fakeClient *fake.Clientset) { + p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + incrementResourceVersion(p) + if p.Status.Conditions != nil { + for i := range p.Status.Conditions { + if p.Status.Conditions[i].Type == corev1.PodReady { + p.Status.Conditions[i].Status = condStatus + } + } + } else { + notReadyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + } + p.Status.Conditions = []corev1.PodCondition{notReadyCondition} + } + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + func incrementResourceVersion(obj metav1.Object) { var rs int64 if obj.GetResourceVersion() != "" { diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go index 2605fad7bc..266312ce2c 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go @@ -40,8 +40,16 @@ func newPodWithPhaseAndIP(podName, namespace string, phase corev1.PodPhase, podI p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace, Labels: labels}, - Spec: corev1.PodSpec{NodeName: "node"}, - Status: corev1.PodStatus{Phase: phase}, + Spec: corev1.PodSpec{NodeName: "node"}, + Status: corev1.PodStatus{ + Phase: phase, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } if len(podIP) > 0 { p.Annotations = map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, podIP)} diff --git a/go-controller/pkg/ovn/egressgw_test.go b/go-controller/pkg/ovn/egressgw_test.go index 9696d4192b..420f2f26e1 100644 --- a/go-controller/pkg/ovn/egressgw_test.go +++ b/go-controller/pkg/ovn/egressgw_test.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "sync" + "time" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" @@ -1818,6 +1819,591 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, ), ) + ginkgo.DescribeTable("reconciles a host networked pod in terminating or not ready state acting as a exgw for another namespace for existing pod", + func(bfd bool, + terminating bool, + beforeUpdateNB []libovsdbtest.TestData, + afterUpdateNB []libovsdbtest.TestData, + expectedNamespaceAnnotation string, + apbExternalRouteCRList *adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList) { + app.Action = func(*cli.Context) error { + + namespaceT := *newNamespace(namespaceName) + namespaceX := *newNamespace(namespace2Name) + t := newTPod( + "node1", + "10.128.1.0/24", + "10.128.1.2", + "10.128.1.1", + "myPod", + "10.128.1.3", + "0a:58:0a:80:01:03", + namespaceT.Name, + ) + gwPod := *newPod(namespaceX.Name, gwPodName, "node2", "9.0.0.1") + gwPod.Annotations = map[string]string{"k8s.ovn.org/routing-namespaces": namespaceT.Name} + if bfd { + gwPod.Annotations["k8s.ovn.org/bfd-enabled"] = "" + } + gwPod.Spec.HostNetwork = true + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + }, + }, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{ + namespaceT, namespaceX, + }, + }, + &corev1.NodeList{ + Items: []corev1.Node{ + *newNode("node1", "192.168.126.202/24"), + *newNode("node2", "192.168.126.50/24"), + }, + }, + &corev1.PodList{ + Items: []corev1.Pod{ + *newPod(t.namespace, t.podName, t.nodeName, t.podIP), + }, + }, + apbExternalRouteCRList, + ) + t.populateLogicalSwitchCache(fakeOvn) + err := fakeOvn.controller.lsManager.AddOrUpdateSwitch("node2", []*net.IPNet{ovntest.MustParseIPNet("10.128.2.0/24")}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + injectNode(fakeOvn) + err = fakeOvn.controller.WatchNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fakeOvn.RunAPBExternalPolicyController() + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Create(context.TODO(), &gwPod, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(beforeUpdateNB)) + gomega.Eventually(func() string { + return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] + }).Should(gomega.Equal("9.0.0.1")) + + if terminating { + ginkgo.By("Setting deletion timestamp for the ex gw pod") + gwPod.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(1000 * time.Second)} + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Update(context.TODO(), &gwPod, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("Updating the ex gw pod status to mark it as not ready") + notReadyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + } + gwPod.Status.Conditions = []corev1.PodCondition{notReadyCondition} + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).UpdateStatus(context.TODO(), &gwPod, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(afterUpdateNB)) + gomega.Eventually(func() string { + return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] + }).Should(gomega.Equal(expectedNamespaceAnnotation)) + for _, apbRoutePolicy := range apbExternalRouteCRList.Items { + checkAPBRouteStatus(fakeOvn, apbRoutePolicy.Name, false) + } + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }, + ginkgo.Entry("No BFD with ex gw pod in terminating state", false, true, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("No BFD with ex gw pod in not ready state", false, false, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("BFD Enabled with ex gw pod in terminating state", true, true, []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.BFD{ + UUID: bfd1NamedUUID, + DstIP: "9.0.0.1", + LogicalPort: "rtoe-GR_node1", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + BFD: &bfd1NamedUUID, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("BFD Enabled with ex gw pod in not ready state", true, false, []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.BFD{ + UUID: bfd1NamedUUID, + DstIP: "9.0.0.1", + LogicalPort: "rtoe-GR_node1", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + BFD: &bfd1NamedUUID, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("No BFD with ex gw pod in terminating state and with overlapping APB External Route CR and annotation", false, true, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ + Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ + newPolicy("policy", + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, + nil, + false, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, + false, + ""), + }, + }, + ), + ginkgo.Entry("No BFD with ex gw pod in not ready state and with overlapping APB External Route CR and annotation", false, false, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ + Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ + newPolicy("policy", + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, + nil, + false, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, + false, + ""), + }, + }, + ), + ) }) ginkgo.Context("on using bfd", func() { ginkgo.It("should enable bfd only on the namespace gw when set", func() { diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index cf1caae6e7..590d34bf3a 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -124,6 +124,12 @@ func newPod(namespace, name, node, podIP string) *corev1.Pod { Phase: corev1.PodRunning, PodIP: podIP, PodIPs: podIPs, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, }, } } From d565fd869e156bd4f299a688794a272647cb0dfe Mon Sep 17 00:00:00 2001 From: arkadeepsen Date: Thu, 12 Jun 2025 23:20:39 +0530 Subject: [PATCH 143/181] Add e2e tests for ex gw pods in terminating or not ready state Signed-off-by: arkadeepsen --- test/e2e/external_gateways.go | 213 +++++++++++++++++++++++++++++----- 1 file changed, 186 insertions(+), 27 deletions(-) diff --git a/test/e2e/external_gateways.go b/test/e2e/external_gateways.go index 4a119ae96b..bf3742ea68 100644 --- a/test/e2e/external_gateways.go +++ b/test/e2e/external_gateways.go @@ -42,6 +42,16 @@ const ( anyLink = "any" ) +// GatewayRemovalType defines ways to remove pod as external gateway +type GatewayRemovalType string + +const ( + GatewayUpdate GatewayRemovalType = "GatewayUpdate" + GatewayDelete GatewayRemovalType = "GatewayDelete" + GatewayDeletionTimestamp GatewayRemovalType = "GatewayDeletionTimestamp" + GatewayNotReady GatewayRemovalType = "GatewayNotReady" +) + func getOverrideNetwork() (string, string, string) { // When the env variable is specified, we use a different docker network for // containers acting as external gateways. @@ -875,10 +885,15 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, deletePod bool) { + ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } + + if removalType == GatewayNotReady { + recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, nil) + } + ginkgo.By("Annotate the external gw pods to manage the src app pod namespace") for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { networkIPs := fmt.Sprintf("\"%s\"", addresses.gatewayIPs[i]) @@ -925,15 +940,9 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { totalPodConnEntries := pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil) gomega.Expect(totalPodConnEntries).To(gomega.Equal(6)) // total conntrack entries for this pod/protocol - if deletePod { - ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName2, servingNamespace)) - err = f.ClientSet.CoreV1().Pods(servingNamespace).Delete(context.TODO(), gatewayPodName2, metav1.DeleteOptions{}) - framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) - // give some time to handle pod delete event - time.Sleep(5 * time.Second) - } else { - ginkgo.By("Remove second external gateway pod's routing-namespace annotation") - annotatePodForGateway(gatewayPodName2, servingNamespace, "", addresses.gatewayIPs[1], false) + cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], true) + if cleanUpFn != nil { + defer cleanUpFn() } // ensure the conntrack deletion tracker annotation is updated @@ -973,12 +982,20 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(podConnEntriesWithMACLabelsSet).To(gomega.Equal(0)) // we don't have any remaining gateways left gomega.Expect(totalPodConnEntries).To(gomega.Equal(4)) // 6-2 }, - ginkgo.Entry("IPV4 udp", &addressesv4, "udp", false), - ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp", false), - ginkgo.Entry("IPV6 udp", &addressesv6, "udp", false), - ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp", false), - ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", true), - ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", true), + ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), + ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), + ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), + ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), + ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", GatewayDelete), + ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", GatewayDelete), + ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), + ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), + ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), + ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), ) }) @@ -1983,11 +2000,15 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string) { + ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } + if removalType == GatewayNotReady { + recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, map[string]string{"name": gatewayPodName2, "gatewayPod": "true"}) + } + for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { annotateMultusNetworkStatusInPodGateway(gwPod, servingNamespace, []string{addresses.gatewayIPs[i], addresses.gatewayIPs[i]}) } @@ -2026,10 +2047,10 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { }, time.Minute, 5).Should(gomega.Equal(podConnEntriesWithMACLabelsSet)) gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) // total conntrack entries for this pod/protocol - ginkgo.By("Remove second external gateway pod's routing-namespace annotation") - p := getGatewayPod(f, servingNamespace, gatewayPodName2) - p.Labels = map[string]string{"name": gatewayPodName2} - updatePod(f, p) + cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], false) + if cleanUpFn != nil { + defer cleanUpFn() + } ginkgo.By("Check if conntrack entries for ECMP routes are removed for the deleted external gateway if traffic is UDP") @@ -2044,7 +2065,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) ginkgo.By("Remove first external gateway pod's routing-namespace annotation") - p = getGatewayPod(f, servingNamespace, gatewayPodName1) + p := getGatewayPod(f, servingNamespace, gatewayPodName1) p.Labels = map[string]string{"name": gatewayPodName1} updatePod(f, p) @@ -2060,11 +2081,19 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) checkAPBExternalRouteStatus(defaultPolicyName) }, - ginkgo.Entry("IPV4 udp", &addressesv4, "udp"), - ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp"), - ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), - ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - + ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), + ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), + ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), + ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), + ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), + ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), + ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), + ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), + ) }) // BFD Tests are dual of external gateway. The only difference is that they enable BFD on ovn and @@ -3595,3 +3624,133 @@ func resetGatewayAnnotations(f *framework.Framework) { annotation}...) } } + +func setupPodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) (*corev1.Pod, error) { + // Handle bash -c commands specially to preserve argument structure + if len(command) >= 3 && command[0] == "bash" && command[1] == "-c" { + // Extract the script part and wrap it to preserve logic + script := strings.Join(command[2:], " ") + command = []string{"bash", "-c", "touch /tmp/ready && (" + script + ")"} + } else { + // For non-bash commands, preserve their structure + var quotedArgs []string + for _, arg := range command { + // Escape single quotes and wrap in single quotes + escaped := strings.ReplaceAll(arg, "'", "'\"'\"'") + quotedArgs = append(quotedArgs, "'"+escaped+"'") + } + command = []string{"bash", "-c", "touch /tmp/ready && " + strings.Join(quotedArgs, " ")} + } + return createPod(f, podName, nodeSelector, namespace, command, labels, func(p *corev1.Pod) { + p.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"cat", "/tmp/ready"}, + }, + }, + InitialDelaySeconds: 5, + PeriodSeconds: 5, + FailureThreshold: 1, + } + }) +} + +func recreatePodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) { + ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", podName, namespace)) + err := deletePodWithWaitByName(context.TODO(), f.ClientSet, podName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Delete second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) + + ginkgo.By(fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe", podName, namespace)) + _, err = setupPodWithReadinessProbe(f, podName, nodeSelector, namespace, command, labels) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe, failed: %v", podName, namespace, err)) + gomega.Eventually(func() bool { + var p *corev1.Pod + p, err = f.ClientSet.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return false + } + for _, condition := range p.Status.Conditions { + if condition.Type == corev1.PodReady { + return condition.Status == corev1.ConditionTrue + } + } + return false + }).Should(gomega.Equal(true), fmt.Sprintf("Readiness probe for second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) +} + +func handleGatewayPodRemoval(f *framework.Framework, removalType GatewayRemovalType, gatewayPodName, servingNamespace, gatewayIP string, isAnnotated bool) func() { + var err error + switch removalType { + case GatewayDelete: + ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName, servingNamespace)) + err := deletePodWithWaitByName(context.TODO(), f.ClientSet, gatewayPodName, servingNamespace) + framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) + return nil + case GatewayUpdate: + if isAnnotated { + ginkgo.By("Remove second external gateway pod's routing-namespace annotation") + annotatePodForGateway(gatewayPodName, servingNamespace, "", gatewayIP, false) + return nil + } + + ginkgo.By("Updating external gateway pod labels") + p := getGatewayPod(f, servingNamespace, gatewayPodName) + p.Labels = map[string]string{"name": gatewayPodName} + updatePod(f, p) + return nil + case GatewayDeletionTimestamp: + ginkgo.By("Setting finalizer then deleting external gateway pod with grace period to set deletion timestamp") + p := getGatewayPod(f, servingNamespace, gatewayPodName) + p.Finalizers = append(p.Finalizers, "k8s.ovn.org/external-gw-pod-finalizer") + updatePod(f, p) + gomega.Eventually(func() bool { + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + return strings.Contains(strings.Join(p.GetFinalizers(), ","), "k8s.ovn.org/external-gw-pod-finalizer") + }).Should(gomega.Equal(true), fmt.Sprintf("Update second external gateway pod %s from ns %s with finalizer, failed: %v", gatewayPodName, servingNamespace, err)) + + p = getGatewayPod(f, servingNamespace, gatewayPodName) + err = e2epod.DeletePodWithGracePeriod(context.Background(), f.ClientSet, p, 1000) + framework.ExpectNoError(err, fmt.Sprintf("unable to delete pod with grace period: %s, err: %v", p.Name, err)) + + gomega.Eventually(func() bool { + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + return p.DeletionTimestamp != nil + }).Should(gomega.BeTrue(), fmt.Sprintf("Gateway pod %s in ns %s should have deletion timestamp, failed: %v", gatewayPodName, servingNamespace, err)) + + // return a function to remove the finalizer + return func() { + p = getGatewayPod(f, servingNamespace, gatewayPodName) + p.Finalizers = []string{} + updatePod(f, p) + } + case GatewayNotReady: + ginkgo.By("Remove /tmp/ready in external gateway pod so that readiness probe fails") + _, err = e2ekubectl.RunKubectl(servingNamespace, "exec", gatewayPodName, "--", "rm", "/tmp/ready") + framework.ExpectNoError(err, fmt.Sprintf("unable to remove /tmp/ready in pod: %s, err: %v", gatewayPodName, err)) + gomega.Eventually(func() bool { + var p *corev1.Pod + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + podReadyStatus := corev1.ConditionTrue + for _, condition := range p.Status.Conditions { + if condition.Type == corev1.PodReady { + podReadyStatus = condition.Status + break + } + } + return podReadyStatus == corev1.ConditionFalse + }).WithTimeout(5*time.Minute).Should(gomega.Equal(true), fmt.Sprintf("Mark second external gateway pod %s from ns %s not ready, failed: %v", gatewayPodName, servingNamespace, err)) + return nil + default: + framework.Failf("unexpected GatewayRemovalType passed: %s", removalType) + return nil + } +} From fa12bb26eac95b0730e77ff311f39900fc1669f8 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 21 Jul 2025 12:09:46 -0400 Subject: [PATCH 144/181] Bump fedora from 41 -> 42 Brings in a new OVS that includes fix: https://github.com/openvswitch/ovs/commit/a119828ea608c38611f6ee60e55a7376ca471d6f Signed-off-by: Tim Rozet --- dist/images/Dockerfile.fedora | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index 49b8da6872..4ca51e888f 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -14,7 +14,7 @@ ARG OVN_FROM=koji ############################################# # Stage to get OVN and OVS RPMs from source # ############################################# -FROM quay.io/fedora/fedora:41 AS ovnbuilder +FROM quay.io/fedora/fedora:42 AS ovnbuilder USER root @@ -78,8 +78,8 @@ RUN git log -n 1 ######################################## # Stage to download OVN RPMs from koji # ######################################## -FROM quay.io/fedora/fedora:41 AS kojidownloader -ARG ovnver=ovn-24.09.2-71.fc41 +FROM quay.io/fedora/fedora:42 AS kojidownloader +ARG ovnver=ovn-24.09.2-71.fc42 USER root @@ -99,14 +99,14 @@ RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] || [ -z "$TARGETPLATFORM"] ; then k ###################################### # Stage to copy OVN RPMs from source # ###################################### -FROM quay.io/fedora/fedora:41 AS source +FROM quay.io/fedora/fedora:42 AS source COPY --from=ovnbuilder /root/ovn/rpm/rpmbuild/RPMS/x86_64/*.rpm / COPY --from=ovnbuilder /root/ovs/rpm/rpmbuild/RPMS/x86_64/*.rpm / #################################### # Stage to copy OVN RPMs from koji # #################################### -FROM quay.io/fedora/fedora:41 AS koji +FROM quay.io/fedora/fedora:42 AS koji COPY --from=kojidownloader /*.rpm / From b4eabd9c09245297a7cb3e89083449bc3a14cdbc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:04:23 +0000 Subject: [PATCH 145/181] Bump the go_modules group across 2 directories with 1 update Bumps the go_modules group with 1 update in the /go-controller directory: [golang.org/x/oauth2](https://github.com/golang/oauth2). Bumps the go_modules group with 1 update in the /test/e2e directory: [golang.org/x/oauth2](https://github.com/golang/oauth2). Updates `golang.org/x/oauth2` from 0.23.0 to 0.27.0 - [Commits](https://github.com/golang/oauth2/compare/v0.23.0...v0.27.0) Updates `golang.org/x/oauth2` from 0.23.0 to 0.27.0 - [Commits](https://github.com/golang/oauth2/compare/v0.23.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-version: 0.27.0 dependency-type: indirect dependency-group: go_modules - dependency-name: golang.org/x/oauth2 dependency-version: 0.27.0 dependency-type: indirect dependency-group: go_modules ... Signed-off-by: dependabot[bot] --- go-controller/go.mod | 2 +- go-controller/go.sum | 4 ++-- .../vendor/golang.org/x/oauth2/README.md | 15 +++++---------- .../vendor/golang.org/x/oauth2/oauth2.go | 2 +- go-controller/vendor/golang.org/x/oauth2/pkce.go | 4 ++-- go-controller/vendor/modules.txt | 4 ++-- test/e2e/go.mod | 2 +- test/e2e/go.sum | 4 ++-- 8 files changed, 16 insertions(+), 21 deletions(-) diff --git a/go-controller/go.mod b/go-controller/go.mod index f40f5001e2..4b12ddd9b5 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -125,7 +125,7 @@ require ( github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/tools v0.26.0 // indirect diff --git a/go-controller/go.sum b/go-controller/go.sum index 50d5e1270d..436b9bad43 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -945,8 +945,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/go-controller/vendor/golang.org/x/oauth2/README.md b/go-controller/vendor/golang.org/x/oauth2/README.md index 781770c204..48dbb9d84c 100644 --- a/go-controller/vendor/golang.org/x/oauth2/README.md +++ b/go-controller/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/go-controller/vendor/golang.org/x/oauth2/oauth2.go b/go-controller/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80..74f052aa9f 100644 --- a/go-controller/vendor/golang.org/x/oauth2/oauth2.go +++ b/go-controller/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/go-controller/vendor/golang.org/x/oauth2/pkce.go b/go-controller/vendor/golang.org/x/oauth2/pkce.go index 50593b6dfe..6a95da975c 100644 --- a/go-controller/vendor/golang.org/x/oauth2/pkce.go +++ b/go-controller/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 5732a53975..14bf2d0651 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -504,8 +504,8 @@ golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.23.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.27.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.12.0 diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 95ac4ff6ae..d87e790619 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -148,7 +148,7 @@ require ( golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 6838af0973..d8a6c5c80c 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -676,8 +676,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From dc437b6327b770d887e339c1d500e95e490d98d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 3 Jul 2025 10:09:55 +0000 Subject: [PATCH 146/181] RouteAdvertisements: appropriately update status even if no updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The status of a RouteAdvertisements might change due to external conditions to the controller. In such cases where the status was bad and the underlying reason for it gets corrected without needing the controller to make further changes, the status would not be updated to reflect it. Signed-off-by: Jaime Caamaño Ruiz --- .../routeadvertisements/controller.go | 14 ++++- .../routeadvertisements/controller_test.go | 63 +++++++++++++++---- 2 files changed, 63 insertions(+), 14 deletions(-) diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 11f7eb79ab..18fb3dbaae 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -951,10 +951,18 @@ func (c *Controller) updateRAStatus(ra *ratypes.RouteAdvertisements, hadUpdates return nil } + var updateStatus bool condition := meta.FindStatusCondition(ra.Status.Conditions, "Accepted") - updateStatus := hadUpdates || condition == nil || condition.ObservedGeneration != ra.Generation - updateStatus = updateStatus || err != nil - + switch { + case condition == nil: + fallthrough + case condition.ObservedGeneration != ra.Generation: + fallthrough + case (err == nil) != (condition.Status == metav1.ConditionTrue): + fallthrough + case hadUpdates: + updateStatus = true + } if !updateStatus { return nil } diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go index 03e9391888..305418425c 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go @@ -47,6 +47,7 @@ type testRA struct { SelectsDefault bool AdvertisePods bool AdvertiseEgressIPs bool + Status *metav1.ConditionStatus } func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { @@ -92,6 +93,9 @@ func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { MatchLabels: tra.FRRConfigurationSelector, } } + if tra.Status != nil { + ra.Status.Conditions = []metav1.Condition{{Type: "Accepted", Status: *tra.Status}} + } return ra } @@ -776,6 +780,38 @@ func TestController_reconcile(t *testing.T) { }, expectNADAnnotations: map[string]map[string]string{"default": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}, "red": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, }, + { + name: "reconciles RouteAdvertisements status even when no other updates are required", + ra: &testRA{Name: "ra", AdvertisePods: true, AdvertiseEgressIPs: true, SelectsDefault: true, Status: ptr.To(metav1.ConditionFalse)}, + frrConfigs: []*testFRRConfig{ + { + Name: "frrConfig", + Namespace: frrNamespace, + Routers: []*testRouter{ + {ASN: 1, Prefixes: []string{"1.1.1.0/24"}, Neighbors: []*testNeighbor{ + {ASN: 1, Address: "1.0.0.100"}, + }}, + }, + }, + { + Labels: map[string]string{types.OvnRouteAdvertisementsKey: "ra"}, + Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "ra/frrConfig/node"}, + NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, + Routers: []*testRouter{ + {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + }}, + }, + }, + }, + nads: []*testNAD{ + {Name: "default", Namespace: "ovn-kubernetes", Network: "default", Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, + }, + nodes: []*testNode{{Name: "node", SubnetsAnnotation: "{\"default\":\"1.1.0.0/24\"}"}}, + eips: []*testEIP{{Name: "eip", EIPs: map[string]string{"node": "1.0.1.1"}}}, + reconcile: "ra", + expectAcceptedStatus: metav1.ConditionTrue, + }, { name: "fails to reconcile a secondary network", ra: &testRA{Name: "ra", AdvertisePods: true, NetworkSelector: map[string]string{"selected": "true"}}, @@ -1005,11 +1041,6 @@ func TestController_reconcile(t *testing.T) { c := NewController(nm.Interface(), wf, fakeClientset) - // prime the default network NAD - if defaultNAD == nil { - defaultNAD, err = c.getOrCreateDefaultNetworkNAD() - g.Expect(err).ToNot(gomega.HaveOccurred()) - } // prime the default network NAD namespace namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -1018,11 +1049,15 @@ func TestController_reconcile(t *testing.T) { } _, err = fakeClientset.KubeClient.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - - // update it with the annotation that network manager would set - defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} - _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // prime the default network NAD + if defaultNAD == nil { + defaultNAD, err = c.getOrCreateDefaultNetworkNAD() + g.Expect(err).ToNot(gomega.HaveOccurred()) + // update it with the annotation that network manager would set + defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} + _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } err = wf.Start() g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -1039,7 +1074,13 @@ func TestController_reconcile(t *testing.T) { ) err = nm.Start() - g.Expect(err).ToNot(gomega.HaveOccurred()) + // some test cases start with a bad RA status, avoid asserting + // initial sync in this case as it will fail + if tt.ra == nil || tt.ra.Status == nil || *tt.ra.Status == metav1.ConditionTrue { + g.Expect(err).ToNot(gomega.HaveOccurred()) + } else { + g.Expect(err).To(gomega.HaveOccurred()) + } // we just need the inital sync nm.Stop() From 90e56b92e08238d979a9bc45b9bbd6cfede55dab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 19 Jun 2025 13:57:09 +0000 Subject: [PATCH 147/181] e2e: rename testdata package to testscenario MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- .../network_segmentation_api_validations.go | 24 +++++++++---------- .../cudn/invalid-scenarios-localnet-mtu.go | 4 ++-- .../invalid-scenarios-localnet-phynetname.go | 4 ++-- .../cudn/invalid-scenarios-localnet-role.go | 4 ++-- .../invalid-scenarios-localnet-subnets.go | 4 ++-- .../cudn/invalid-scenarios-localnet-vlan.go | 4 ++-- .../invalid-scenarios-mismatch-topo-conf.go | 4 ++-- .../cudn/valid-scenarios-localnet.go | 4 ++-- .../{testdata => testscenario}/scenario.go | 2 +- 9 files changed, 27 insertions(+), 27 deletions(-) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-localnet-mtu.go (94%) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-localnet-phynetname.go (97%) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-localnet-role.go (87%) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-localnet-subnets.go (98%) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-localnet-vlan.go (95%) rename test/e2e/{testdata => testscenario}/cudn/invalid-scenarios-mismatch-topo-conf.go (95%) rename test/e2e/{testdata => testscenario}/cudn/valid-scenarios-localnet.go (93%) rename test/e2e/{testdata => testscenario}/scenario.go (90%) diff --git a/test/e2e/network_segmentation_api_validations.go b/test/e2e/network_segmentation_api_validations.go index 0608485b3d..b3b29191fb 100644 --- a/test/e2e/network_segmentation_api_validations.go +++ b/test/e2e/network_segmentation_api_validations.go @@ -6,13 +6,13 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" - testdatacudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata/cudn" + "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" + testscenariocudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario/cudn" ) var _ = Describe("Network Segmentation: API validations", func() { DescribeTable("api-server should reject invalid CRs", - func(scenarios []testdata.ValidateCRScenario) { + func(scenarios []testscenario.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -23,16 +23,16 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(stderr).To(ContainSubstring(s.ExpectedErr)) } }, - Entry("ClusterUserDefinedNetwork, mismatch topology and config", testdatacudn.MismatchTopologyConfig), - Entry("ClusterUserDefinedNetwork, localnet, invalid role", testdatacudn.LocalnetInvalidRole), - Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testdatacudn.LocalnetInvalidPhyNetName), - Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testdatacudn.LocalnetInvalidSubnets), - Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testdatacudn.LocalnetInvalidMTU), - Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testdatacudn.LocalnetInvalidVLAN), + Entry("ClusterUserDefinedNetwork, mismatch topology and config", testscenariocudn.MismatchTopologyConfig), + Entry("ClusterUserDefinedNetwork, localnet, invalid role", testscenariocudn.LocalnetInvalidRole), + Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testscenariocudn.LocalnetInvalidPhyNetName), + Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testscenariocudn.LocalnetInvalidSubnets), + Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testscenariocudn.LocalnetInvalidMTU), + Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testscenariocudn.LocalnetInvalidVLAN), ) DescribeTable("api-server should accept valid CRs", - func(scenarios []testdata.ValidateCRScenario) { + func(scenarios []testscenario.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -42,7 +42,7 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(err).NotTo(HaveOccurred(), "should create valid CR successfully") } }, - Entry("ClusterUserDefinedNetwork, localnet", testdatacudn.LocalnetValid), + Entry("ClusterUserDefinedNetwork, localnet", testscenariocudn.LocalnetValid), ) }) @@ -52,7 +52,7 @@ func runKubectlInputWithFullOutput(namespace string, data string, args ...string return e2ekubectl.NewKubectlCommand(namespace, args...).WithStdinData(data).ExecWithFullOutput() } -func cleanupValidateCRsTest(scenarios []testdata.ValidateCRScenario) { +func cleanupValidateCRsTest(scenarios []testscenario.ValidateCRScenario) { for _, s := range scenarios { e2ekubectl.RunKubectlInput("", s.Manifest, "delete", "-f", "-") } diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go similarity index 94% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go index e1ce9e8c70..d7e3590ffd 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidMTU = []testdata.ValidateCRScenario{ +var LocalnetInvalidMTU = []testscenario.ValidateCRScenario{ { Description: "invalid MTU - higher than 65536", ExpectedErr: `spec.network.localnet.mtu in body should be less than or equal to 65536`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go similarity index 97% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go index 83c6664804..171678c9ca 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidPhyNetName = []testdata.ValidateCRScenario{ +var LocalnetInvalidPhyNetName = []testscenario.ValidateCRScenario{ { Description: "unset PhysicalNetworkName", ExpectedErr: `spec.network.localnet.physicalNetworkName: Required value`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go similarity index 87% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go index fad452da04..443f78970a 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidRole = []testdata.ValidateCRScenario{ +var LocalnetInvalidRole = []testscenario.ValidateCRScenario{ { Description: "role unset", ExpectedErr: `spec.network.localnet.role: Required value`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go similarity index 98% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go index d62a216d48..bd854acdb2 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidSubnets = []testdata.ValidateCRScenario{ +var LocalnetInvalidSubnets = []testscenario.ValidateCRScenario{ { Description: "unset subnets, and ipam.mode is unset", ExpectedErr: `Subnets is required with ipam.mode is Enabled or unset, and forbidden otherwise`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go similarity index 95% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go index daa393acdb..8ab71ca8dc 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidVLAN = []testdata.ValidateCRScenario{ +var LocalnetInvalidVLAN = []testscenario.ValidateCRScenario{ { Description: "invalid VLAN - invalid mode", ExpectedErr: `spec.network.localnet.vlan.mode: Unsupported value: "Disabled": supported values: "Access`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go b/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go similarity index 95% rename from test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go rename to test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go index 80551a94cd..ddad69d54e 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var MismatchTopologyConfig = []testdata.ValidateCRScenario{ +var MismatchTopologyConfig = []testscenario.ValidateCRScenario{ { Description: "topology is localnet but topology config is layer2", ExpectedErr: `spec.localnet is required when topology is Localnet and forbidden otherwise`, diff --git a/test/e2e/testdata/cudn/valid-scenarios-localnet.go b/test/e2e/testscenario/cudn/valid-scenarios-localnet.go similarity index 93% rename from test/e2e/testdata/cudn/valid-scenarios-localnet.go rename to test/e2e/testscenario/cudn/valid-scenarios-localnet.go index a5b188bbfd..d2c7b24d78 100644 --- a/test/e2e/testdata/cudn/valid-scenarios-localnet.go +++ b/test/e2e/testscenario/cudn/valid-scenarios-localnet.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetValid = []testdata.ValidateCRScenario{ +var LocalnetValid = []testscenario.ValidateCRScenario{ { Description: "should create localnet topology successfully - minimal", Manifest: ` diff --git a/test/e2e/testdata/scenario.go b/test/e2e/testscenario/scenario.go similarity index 90% rename from test/e2e/testdata/scenario.go rename to test/e2e/testscenario/scenario.go index db96d3b50b..4ee247fd98 100644 --- a/test/e2e/testdata/scenario.go +++ b/test/e2e/testscenario/scenario.go @@ -1,4 +1,4 @@ -package testdata +package testscenario // ValidateCRScenario represent test scenario where a manifest is applied and failed with the expected error type ValidateCRScenario struct { From 3dea4f522d30e5849d060f9d5e0cdc2ff90e34c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 19 Jun 2025 17:27:23 +0000 Subject: [PATCH 148/181] e2e: add RuntimeArgs to container infra provider API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/e2e.go | 10 +++++----- test/e2e/egress_firewall.go | 4 ++-- test/e2e/egress_services.go | 4 ++-- test/e2e/egressip.go | 10 +++++----- test/e2e/external_gateways.go | 14 +++++++------- test/e2e/infraprovider/api/api.go | 19 ++++++++++--------- test/e2e/infraprovider/providers/kind/kind.go | 5 +++-- test/e2e/kubevirt.go | 2 +- test/e2e/multihoming.go | 4 ++-- test/e2e/network_segmentation.go | 2 +- test/e2e/node_ip_mac_migration.go | 2 +- test/e2e/pod.go | 2 +- test/e2e/service.go | 6 +++--- 13 files changed, 43 insertions(+), 41 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index be1b46bf75..e5bbde7d42 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -712,7 +712,7 @@ var _ = ginkgo.Describe("e2e control plane", func() { } secondaryExternalContainerPort := infraprovider.Get().GetExternalContainerPort() secondaryExternalContainerSpec := infraapi.ExternalContainer{Name: "e2e-ovn-k", Image: images.AgnHost(), - Network: secondaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} + Network: secondaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} ginkgo.By("creating container on secondary provider network") secondaryExternalContainer, err = providerCtx.CreateExternalContainer(secondaryExternalContainerSpec) framework.ExpectNoError(err, "failed to create external container") @@ -1275,7 +1275,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress", Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external service", externalContainer.String()) }) @@ -1672,7 +1672,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress-add-more", Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) @@ -1834,7 +1834,7 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation", framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) }) @@ -1943,7 +1943,7 @@ var _ = ginkgo.Describe("e2e br-int flow monitoring export validation", func() { primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get primary network") collectorExternalContainer := infraapi.ExternalContainer{Name: getContainerName(collectorPort), Image: "cloudflare/goflow", - Network: primaryProviderNetwork, Args: []string{"-kafka=false"}, ExtPort: collectorPort} + Network: primaryProviderNetwork, CmdArgs: []string{"-kafka=false"}, ExtPort: collectorPort} collectorExternalContainer, err = providerCtx.CreateExternalContainer(collectorExternalContainer) if err != nil { framework.Failf("failed to start flow collector container %s: %v", getContainerName(collectorPort), err) diff --git a/test/e2e/egress_firewall.go b/test/e2e/egress_firewall.go index 32974beb1c..abbc26b524 100644 --- a/test/e2e/egress_firewall.go +++ b/test/e2e/egress_firewall.go @@ -197,7 +197,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressF Name: externalContainerName1, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, + CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, ExtPort: externalContainer1Port, } externalContainer1, err = providerCtx.CreateExternalContainer(externalContainer1Spec) @@ -210,7 +210,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressF Name: externalContainerName2, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, + CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, ExtPort: externalContainer2Port, } externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2Spec) diff --git a/test/e2e/egress_services.go b/test/e2e/egress_services.go index 2afcb2edc8..ee2fec30f4 100644 --- a/test/e2e/egress_services.go +++ b/test/e2e/egress_services.go @@ -85,7 +85,7 @@ var _ = ginkgo.Describe("EgressService", feature.EgressService, func() { framework.ExpectNoError(err, "failed to get primary provider network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, ExtPort: 8080, - Args: getAgnHostHTTPPortBindCMDArgs(8080)} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(8080)} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") }) @@ -1239,7 +1239,7 @@ metadata: ginkgo.By(fmt.Sprintf("Creating container %s", net.containerName)) // Setting the --hostname here is important since later we poke the container's /hostname endpoint extContainerSecondaryNet := infraapi.ExternalContainer{Name: net.containerName, Image: images.AgnHost(), Network: network, - Args: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} + CmdArgs: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} extContainerSecondaryNet, err = providerCtx.CreateExternalContainer(extContainerSecondaryNet) ginkgo.By(fmt.Sprintf("Adding a listener for the shared IPv4 %s on %s", sharedIPv4, net.containerName)) out, err := infraprovider.Get().ExecExternalContainerCommand(extContainerSecondaryNet, []string{"ip", "address", "add", sharedIPv4 + "/32", "dev", "lo"}) diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index d9d281aa7b..1bdc03adec 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -219,7 +219,7 @@ func isSupportedAgnhostForEIP(externalContainer infraapi.ExternalContainer) bool if externalContainer.Image != images.AgnHost() { return false } - if !util.SliceHasStringItem(externalContainer.Args, "netexec") { + if !util.SliceHasStringItem(externalContainer.CmdArgs, "netexec") { return false } return true @@ -754,13 +754,13 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP // attach containers to the primary network primaryTargetExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryTargetExternalContainerSpec := infraapi.ExternalContainer{Name: targetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} + Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} primaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(primaryTargetExternalContainerSpec) framework.ExpectNoError(err, "failed to create external target container on primary network", primaryTargetExternalContainerSpec.String()) primaryDeniedExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryDeniedExternalContainerSpec := infraapi.ExternalContainer{Name: deniedTargetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} + Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} primaryDeniedExternalContainer, err = providerCtx.CreateExternalContainer(primaryDeniedExternalContainerSpec) framework.ExpectNoError(err, "failed to create external denied container on primary network", primaryDeniedExternalContainer.String()) @@ -791,7 +791,7 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP Name: targetSecondaryNodeName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), + CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), ExtPort: secondaryTargetExternalContainerPort, } secondaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(secondaryTargetExternalContainerSpec) @@ -2125,7 +2125,7 @@ spec: providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get providers primary network") externalContainerPrimary := infraapi.ExternalContainer{Name: "external-container-for-egressip-mtu-test", Image: images.AgnHost(), - Network: providerPrimaryNetwork, Args: []string{"pause"}, ExtPort: externalContainerPrimaryPort} + Network: providerPrimaryNetwork, CmdArgs: []string{"pause"}, ExtPort: externalContainerPrimaryPort} externalContainerPrimary, err = providerCtx.CreateExternalContainer(externalContainerPrimary) framework.ExpectNoError(err, "failed to create external container: %s", externalContainerPrimary.String()) diff --git a/test/e2e/external_gateways.go b/test/e2e/external_gateways.go index bf3742ea68..c3b2f12198 100644 --- a/test/e2e/external_gateways.go +++ b/test/e2e/external_gateways.go @@ -144,7 +144,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, Args: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, CmdArgs: []string{"pause"}} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container") if network.Name() == "host" { @@ -238,7 +238,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { // start the container that will act as a new external gateway that the tests will be updated to use externalContainer2Port := infraprovider.Get().GetExternalContainerPort() externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate2, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, Args: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, CmdArgs: []string{"pause"}} externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerNameTemplate2, externalContainerPort)) if network.Name() == "host" { @@ -365,7 +365,7 @@ var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerTemplate, externalContainerPort), Image: images.AgnHost(), Network: network, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerTemplate, externalContainerPort)) if network.Name() == "host" { @@ -2922,9 +2922,9 @@ func setupGatewayContainers(f *framework.Framework, providerCtx infraapi.Context var err error externalContainer1 := infraapi.ExternalContainer{Name: getContainerName(container1Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(container2Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} gwContainers := []infraapi.ExternalContainer{externalContainer1, externalContainer2} addressesv4 := gatewayTestIPs{targetIPs: make([]string, 0)} @@ -3175,12 +3175,12 @@ func setupGatewayContainersForConntrackTest(f *framework.Framework, providerCtx addressesv6 := gatewayTestIPs{gatewayIPs: make([]string, 2)} ginkgo.By("Creating the gateway containers for the UDP test") gwExternalContainer1 := infraapi.ExternalContainer{Name: getContainerName(gwContainer1Template, 12345), - Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} gwExternalContainer1, err = providerCtx.CreateExternalContainer(gwExternalContainer1) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer1) gwExternalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainer2Template, 12345), - Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} gwExternalContainer2, err = providerCtx.CreateExternalContainer(gwExternalContainer2) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer2) if network.Name() == "host" { diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index c654f798c3..81101c622e 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -182,14 +182,15 @@ func (n NetworkInterface) GetMAC() string { } type ExternalContainer struct { - Name string - Image string - Network Network - Entrypoint string - Args []string - ExtPort uint16 - IPv4 string - IPv6 string + Name string + Image string + Network Network + Entrypoint string + CmdArgs []string + ExtPort uint16 + IPv4 string + IPv6 string + RuntimeArgs []string } func (ec ExternalContainer) GetName() string { @@ -227,7 +228,7 @@ func (ec ExternalContainer) IsIPv6() bool { } func (ec ExternalContainer) String() string { - str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.Args, " ")) + str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, RuntimeArgs: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.RuntimeArgs, " "), strings.Join(ec.CmdArgs, " ")) if ec.IsIPv4() { str = fmt.Sprintf("%s, IPv4 address: %q", str, ec.GetIPv4()) } diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index 4d0dc6a226..e6290e7fee 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -150,9 +150,10 @@ func (c *contextKind) createExternalContainer(container api.ExternalContainer) ( if container.Entrypoint != "" { cmd = append(cmd, "--entrypoint", container.Entrypoint) } + cmd = append(cmd, container.RuntimeArgs...) cmd = append(cmd, container.Image) - if len(container.Args) > 0 { - cmd = append(cmd, container.Args...) + if len(container.CmdArgs) > 0 { + cmd = append(cmd, container.CmdArgs...) } else { if images.AgnHost() == container.Image { cmd = append(cmd, "pause") diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 839301ae11..67ab2e290a 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1762,7 +1762,7 @@ write_files: Name: externalContainerName, Image: images.IPerf3(), Network: providerNetwork, - Args: []string{"sleep infinity"}, + CmdArgs: []string{"sleep infinity"}, ExtPort: externalContainerPort, } externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 3ad1dd46e7..e82255fc57 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -941,7 +941,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Image: images.AgnHost(), Network: underlayNetwork, Entrypoint: "bash", - Args: []string{"-c", fmt.Sprintf("ip a add %s/24 dev eth0 && ./agnhost netexec --http-port=%d", underlayServiceIP, servicePort)}, + CmdArgs: []string{"-c", fmt.Sprintf("ip a add %s/24 dev eth0 && ./agnhost netexec --http-port=%d", underlayServiceIP, servicePort)}, ExtPort: servicePort, } _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) @@ -1310,7 +1310,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { Network: underlayNetwork, Entrypoint: "bash", ExtPort: servicePort, - Args: []string{"-c", fmt.Sprintf(` + CmdArgs: []string{"-c", fmt.Sprintf(` ip link add link %[1]s name %[2]s type vlan id %[3]d ip link set dev %[2]s up ip a add %[4]s/24 dev %[2]s diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index 659b18acc7..2fe012343b 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -1478,7 +1478,7 @@ spec: Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - Args: httpServerContainerCmd(uint16(externalContainerPort)), + CmdArgs: httpServerContainerCmd(uint16(externalContainerPort)), ExtPort: externalContainerPort, } externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index d7b12f4b24..19626e50e6 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -132,7 +132,7 @@ spec: framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") externalContainerIPs[4], externalContainerIPs[6] = externalContainer.GetIPv4(), externalContainer.GetIPv6() diff --git a/test/e2e/pod.go b/test/e2e/pod.go index e43ecee03a..c9a5e5efb7 100644 --- a/test/e2e/pod.go +++ b/test/e2e/pod.go @@ -105,7 +105,7 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get provider primary network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - Args: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, + CmdArgs: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container (%s)", externalContainer) diff --git a/test/e2e/service.go b/test/e2e/service.go index 0df017d523..6e3ff61c27 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -812,7 +812,7 @@ var _ = ginkgo.Describe("Services", feature.Service, func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created", externalContainer.Name) @@ -1011,7 +1011,7 @@ var _ = ginkgo.Describe("Services", feature.Service, func() { Name: targetSecondaryContainerName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), + CmdArgs: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), ExtPort: serverExternalContainerPort, } serverExternalContainer, err := providerCtx.CreateExternalContainer(serverExternalContainerSpec) @@ -1315,7 +1315,7 @@ spec: ginkgo.By("Creating an external client") externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} + CmdArgs: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container", externalContainer) From acef39f4cde22069620c7d078220bce9b6e02c12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 19 Jun 2025 17:27:53 +0000 Subject: [PATCH 149/181] e2e: make ExtPort not required in container infra provider API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/infraprovider/api/api.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index 81101c622e..1d2d3466fb 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -249,9 +249,6 @@ func (ec ExternalContainer) IsValidPreCreateContainer() (bool, error) { if ec.Network.String() == "" { errs = append(errs, errors.New("network is not set")) } - if ec.ExtPort == 0 { - errs = append(errs, errors.New("port is not set")) - } if len(errs) == 0 { return true, nil } From 926ba1ad397c5b218defb497d1a7ade390e8e7f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Wed, 2 Jul 2025 14:01:02 +0000 Subject: [PATCH 150/181] e2e: use index in kind infra inspect templates to allow special characters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/infraprovider/providers/kind/kind.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index e6290e7fee..8c068c7411 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -497,13 +497,13 @@ func (c *contextKind) cleanUp() error { const ( nameFormat = "{{.Name}}" - inspectNetworkIPv4GWKeyStr = "{{ .NetworkSettings.Networks.%s.Gateway }}" - inspectNetworkIPv4AddrKeyStr = "{{ .NetworkSettings.Networks.%s.IPAddress }}" - inspectNetworkIPv4PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.IPPrefixLen }}" - inspectNetworkIPv6GWKeyStr = "{{ .NetworkSettings.Networks.%s.IPv6Gateway }}" - inspectNetworkIPv6AddrKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6Address }}" - inspectNetworkIPv6PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6PrefixLen }}" - inspectNetworkMACKeyStr = "{{ .NetworkSettings.Networks.%s.MacAddress }}" + inspectNetworkIPv4GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .Gateway }}{{ end }}" + inspectNetworkIPv4AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPAddress }}{{ end }}" + inspectNetworkIPv4PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPPrefixLen }}{{ end }}" + inspectNetworkIPv6GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPv6Gateway }}{{ end }}" + inspectNetworkIPv6AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6Address }}{{ end }}" + inspectNetworkIPv6PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6PrefixLen }}{{ end }}" + inspectNetworkMACKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .MacAddress }}{{ end }}" inspectNetworkContainersKeyStr = "{{ range $key, $value := .Containers }}{{ printf \"%s\\n\" $value.Name}}{{ end }}'" emptyValue = "" ) From edb05ca1087a69c9fee8bbb7bf30bde36f53ced9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 3 Jul 2025 11:57:08 +0000 Subject: [PATCH 151/181] kind.sh: Use FRRConfiguration label when advertising default network MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid selecting FRRConfigurations that have other purposes. Signed-off-by: Jaime Caamaño Ruiz --- dist/templates/ovn-setup.yaml.j2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dist/templates/ovn-setup.yaml.j2 b/dist/templates/ovn-setup.yaml.j2 index 8112e06670..981a362859 100644 --- a/dist/templates/ovn-setup.yaml.j2 +++ b/dist/templates/ovn-setup.yaml.j2 @@ -89,7 +89,9 @@ spec: networkSelectors: - networkSelectionType: DefaultNetwork nodeSelector: {} - frrConfigurationSelector: {} + frrConfigurationSelector: + matchLabels: + name: receive-all advertisements: - "PodNetwork" {%- endif %} From 5ece8463d577233268d0a8229b814151bc00ac00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Thu, 19 Jun 2025 17:29:52 +0000 Subject: [PATCH 152/181] e2e: add VRF-Lite test cases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/deploymentconfig/api/api.go | 1 + .../e2e/deploymentconfig/configs/kind/kind.go | 4 + test/e2e/network_segmentation.go | 26 +- test/e2e/route_advertisements.go | 1159 ++++++++++++++++- .../frr-k8s/frrconf.yaml.tmpl | 46 + .../routeadvertisements/frr/daemons.tmpl | 82 ++ .../routeadvertisements/frr/frr.conf.tmpl | 57 + test/e2e/util.go | 124 +- 8 files changed, 1455 insertions(+), 44 deletions(-) create mode 100644 test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl create mode 100644 test/e2e/testdata/routeadvertisements/frr/daemons.tmpl create mode 100644 test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl diff --git a/test/e2e/deploymentconfig/api/api.go b/test/e2e/deploymentconfig/api/api.go index 573ced8cb8..dc43e87c9b 100644 --- a/test/e2e/deploymentconfig/api/api.go +++ b/test/e2e/deploymentconfig/api/api.go @@ -4,6 +4,7 @@ package api // Remove when OVN-Kubernetes exposes its config via an API. type DeploymentConfig interface { OVNKubernetesNamespace() string + FRRK8sNamespace() string ExternalBridgeName() string PrimaryInterfaceName() string } diff --git a/test/e2e/deploymentconfig/configs/kind/kind.go b/test/e2e/deploymentconfig/configs/kind/kind.go index be3f35aa73..d05c6a7061 100644 --- a/test/e2e/deploymentconfig/configs/kind/kind.go +++ b/test/e2e/deploymentconfig/configs/kind/kind.go @@ -33,6 +33,10 @@ func (k kind) OVNKubernetesNamespace() string { return "ovn-kubernetes" } +func (k kind) FRRK8sNamespace() string { + return "frr-k8s-system" +} + func (k kind) ExternalBridgeName() string { return "breth0" } diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index 2fe012343b..dec466f423 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -1887,31 +1887,17 @@ func generateLayer3Subnets(cidrs string) []string { // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided udn func userDefinedNetworkReadyFunc(client dynamic.Interface, namespace, name string) func() error { - return func() error { - udn, err := client.Resource(udnGVR).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}, "status") - if err != nil { - return err - } - conditions, err := getConditions(udn) - if err != nil { - return err - } - if len(conditions) == 0 { - return fmt.Errorf("no conditions found in: %v", udn) - } - for _, condition := range conditions { - if condition.Type == "NetworkCreated" && condition.Status == metav1.ConditionTrue { - return nil - } - } - return fmt.Errorf("no NetworkCreated condition found in: %v", udn) - } + return networkReadyFunc(client.Resource(udnGVR).Namespace(namespace), name) } // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided cluster udn func clusterUserDefinedNetworkReadyFunc(client dynamic.Interface, name string) func() error { + return networkReadyFunc(client.Resource(clusterUDNGVR), name) +} + +func networkReadyFunc(client dynamic.ResourceInterface, name string) func() error { return func() error { - cUDN, err := client.Resource(clusterUDNGVR).Get(context.Background(), name, metav1.GetOptions{}, "status") + cUDN, err := client.Get(context.Background(), name, metav1.GetOptions{}, "status") if err != nil { return err } diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index a08b80c6b0..08e6f11965 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -2,10 +2,14 @@ package e2e import ( "context" + "embed" "fmt" "math/rand" "net" + "os" + "path/filepath" "strings" + "text/template" "time" @@ -16,6 +20,8 @@ import ( apitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -24,6 +30,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -34,13 +46,14 @@ import ( utilnet "k8s.io/utils/net" ) +const ( + serverContainerName = "bgpserver" + routerContainerName = "frr" + echoClientPodName = "echo-client-pod" + bgpExternalNetworkName = "bgpnet" +) + var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", func() { - const ( - serverContainerName = "bgpserver" - routerContainerName = "frr" - echoClientPodName = "echo-client-pod" - bgpExternalNetworkName = "bgpnet" - ) var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -236,13 +249,6 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is }) var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", func() { - const ( - serverContainerName = "bgpserver" - routerContainerName = "frr" - echoClientPodName = "echo-client-pod" - bgpExternalNetworkName = "bgpnet" - placeholder = "PLACEHOLDER_NAMESPACE" - ) var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -1061,3 +1067,1130 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }, ), ) + +var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { + + // testing helpers used throughout this testing node + const ( + // FIXME: each test brings its own topology up, and sometimes zebra on + // external FRR container fails to start on the first attempt for + // unknown reasons delaying the overall availability, so we need to use + // long timeouts + timeout = 240 * time.Second + timeoutNOK = 10 * time.Second + netexecPort = 8080 + ) + var netexecPortStr = fmt.Sprintf("%d", netexecPort) + testPodToHostnameAndExpect := func(src *corev1.Pod, dstIP, expect string) { + ginkgo.GinkgoHelper() + hostname, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/hostname", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(hostname).To(gomega.Equal(expect)) + } + testPodToClientIP := func(src *corev1.Pod, dstIP string) { + ginkgo.GinkgoHelper() + _, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + testPodToClientIPAndExpect := func(src *corev1.Pod, dstIP, expect string) { + ginkgo.GinkgoHelper() + ip, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ip, _, err = net.SplitHostPort(ip) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ip).To(gomega.Equal(expect)) + } + testContainerToClientIPAndExpect := func(src, dstIP, expect string) { + ginkgo.GinkgoHelper() + gomega.Eventually(func(g gomega.Gomega) { + // FIXME: using ExecK8NodeCommand instead of + // ExecExternalContainerCommand, they arent any + // different but ExecK8NodeCommand is more convinient + ip, err := infraprovider.Get().ExecK8NodeCommand( + src, + []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, + ) + g.Expect(err).NotTo(gomega.HaveOccurred()) + ip, _, err = net.SplitHostPort(ip) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(ip).To(gomega.Equal(expect)) + }).WithTimeout(timeout).WithPolling(framework.Poll).Should(gomega.Succeed()) + } + testPodToClientIPNOK := func(src *corev1.Pod, dstIP string) { + gomega.Consistently(func(g gomega.Gomega) { + _, err := e2epodoutput.RunHostCmd( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + ) + g.Expect(err).To(gomega.HaveOccurred()) + }).WithTimeout(timeoutNOK).WithPolling(framework.Poll).Should(gomega.Succeed()) + } + testContainerToClientIPNOK := func(src, dstIP string) { + gomega.Consistently(func(g gomega.Gomega) { + _, err := infraprovider.Get().ExecK8NodeCommand( + src, + []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, + ) + g.Expect(err).To(gomega.HaveOccurred()) + }).WithTimeout(timeoutNOK).WithPolling(framework.Poll).Should(gomega.Succeed()) + } + + const ( + baseName = "vrflite" + bgpPeerSubnetIPv4 = "172.36.0.0/16" + bgpPeerSubnetIPv6 = "fc00:f853:ccd:36::/64" + // TODO: test with overlaps but we need better isolation from the infra + // provider, docker `--internal` bridge networks with iptables based + // isolation doesn't cut it. macvlan driver might be a better option. + bgpServerSubnetIPv4 = "172.38.0.0/16" + bgpServerSubnetIPv6 = "fc00:f853:ccd:38::/64" + ) + + f := wrappedTestFramework(baseName) + f.SkipNamespaceCreation = true + var ipFamilySet sets.Set[utilnet.IPFamily] + var ictx infraapi.Context + var testBaseName, testSuffix, testNetworkName, bgpServerName string + + ginkgo.BeforeEach(func() { + if !isLocalGWModeEnabled() { + e2eskipper.Skipf("VRF-Lite test cases only supported in Local Gateway mode") + } + ipFamilySet = sets.New(getSupportedIPFamiliesSlice(f.ClientSet)...) + ictx = infraprovider.Get().NewTestContext() + testSuffix = framework.RandomSuffix() + testBaseName = baseName + testSuffix + testNetworkName = testBaseName + bgpServerName = testNetworkName + "-bgpserver" + + // we will create a agnhost server on an extra network peered with BGP + ginkgo.By("Running a BGP network with an agnhost server") + bgpPeerCIDRs := []string{bgpPeerSubnetIPv4, bgpPeerSubnetIPv6} + bgpServerCIDRs := []string{bgpServerSubnetIPv4, bgpServerSubnetIPv6} + gomega.Expect(runBGPNetworkAndServer(f, ictx, testNetworkName, bgpServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) + }) + + // define networks to test with + const ( + cudnCIDRv4 = "103.103.0.0/16" + cudnCIDRv6 = "2014:100:200::0/60" + ) + var ( + layer3NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: "Primary", + Subnets: []udnv1.Layer3Subnet{{CIDR: cudnCIDRv4, HostSubnet: 24}, {CIDR: cudnCIDRv6, HostSubnet: 64}}, + }, + } + ) + + matchL3SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.Layer3Subnet) (out []udnv1.Layer3Subnet) { + for _, subnet := range in { + if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet.CIDR))) { + out = append(out, subnet) + } + } + return + } + matchL2SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.CIDR) (out []udnv1.CIDR) { + for _, subnet := range in { + if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet))) { + out = append(out, subnet) + } + } + return + } + + networksToTest := []ginkgo.TableEntry{ + ginkgo.Entry("Layer 3", layer3NetworkSpec), + } + + ginkgo.DescribeTableSubtree("When the tested network is of type", + func(networkSpec *udnv1.NetworkSpec) { + var testNamespace *corev1.Namespace + var testPod *corev1.Pod + + getSameNode := func() string { + return testPod.Spec.NodeName + } + getDifferentNode := func() string { + ginkgo.GinkgoHelper() + nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get ready schedulable nodes") + for _, node := range nodes.Items { + if node.Name != testPod.Spec.NodeName { + return node.Name + } + } + ginkgo.Fail(fmt.Sprintf("Failed to find a different ready schedulable node than %s", testPod.Spec.NodeName)) + return "" + } + + ginkgo.BeforeEach(func() { + var err error + networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + + ginkgo.By("Configuring the namespace and network") + testNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, testNetworkName, cudnAdvertisedVRFLite, networkSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + f.Namespace = testNamespace + + // attach network to the VRF on all nodes + ginkgo.By("Attaching the BGP peer network to the CUDN VRF") + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + network, err := infraprovider.Get().GetNetwork(testNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, node := range nodeList.Items { + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(node.Name, network) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "link", "set", "dev", iface.InfName, "master", testNetworkName}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // quirk: need to reset IPv6 address + _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "address", "add", iface.IPv6 + "/" + iface.IPv6Prefix, "dev", iface.InfName}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.Describe("When a pod runs on the tested network", func() { + ginkgo.BeforeEach(func() { + ginkgo.By("Running a pod on the tested network namespace") + testPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + testNamespace.Name, + testNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + }, + ) + }) + + ginkgo.DescribeTable("It can reach an external server on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the pod can reach the external server") + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + testPodToHostnameAndExpect(testPod, serverIP, bgpServerName) + + ginkgo.By("Ensuring a request from the pod is not SNATed") + testPodIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPAndExpect(testPod, serverIP, testPodIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("It can be reached by an external server on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the external server can reach the pod") + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + podIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + testContainerToClientIPAndExpect(bgpServerName, podIP, serverIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.It("Can reach KAPI service", func() { + ginkgo.By("Ensuring a request from the pod can reach KAPI service") + output, err := e2epodoutput.RunHostCmdWithRetries( + testPod.Namespace, + testPod.Name, + "curl --max-time 2 -g -q -s -k https://kubernetes.default/healthz", + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.Equal("ok")) + }) + + ginkgo.DescribeTable("It cannot reach an external server on a different network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the pod cannot reach the external server") + // using the external server setup for the default network + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpExternalNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(serverContainerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, serverIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("It cannot be reached by an external server on a different network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the external server cannot reach the pod") + podIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + // using the external server setup for the default network + testContainerToClientIPNOK(serverContainerName, podIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTableSubtree("It cannot be reached by a cluster node", + func(getNode func() string) { + ginkgo.DescribeTable("", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the node cannot reach the tested network pod") + podIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + testContainerToClientIPNOK(getNode(), podIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + }, + ginkgo.Entry("When it is the same node", getSameNode), + ginkgo.Entry("When it is a different node", getDifferentNode), + ) + + ginkgo.DescribeTableSubtree("When other pod runs on the tested network", + func(getNode func() string) { + var otherPod *corev1.Pod + + ginkgo.BeforeEach(func() { + ginkgo.By("Running other pod on the tested network namespace") + otherPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + testNamespace.Name, + testNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + p.Labels = map[string]string{"app": "netexec-pod"} + }, + ) + }) + + ginkgo.DescribeTable("The pods on the tested network can reach each other", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the first pod can reach the second pod") + otherPodIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + otherPod.Namespace, + otherPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIP(testPod, otherPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.Describe("Backing a ClusterIP service", func() { + var service *corev1.Service + + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a service backed by the other network pod") + service = e2eservice.CreateServiceSpec( + "service-for-netexec", + "", + false, + otherPod.Labels, + ) + service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} + familyPolicy := corev1.IPFamilyPolicyPreferDualStack + service.Spec.IPFamilyPolicy = &familyPolicy + var err error + service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTable("The first pod can reach the ClusterIP service on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the first pod can reach the ClusterIP service") + clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) + gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) + testPodToClientIP(testPod, clusterIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + }) + }, + ginkgo.Entry("On the same node", getSameNode), + ginkgo.Entry("On a different node", getDifferentNode), + ) + + ginkgo.Describe("When there is other network", func() { + const ( + otherBGPPeerSubnetIPv4 = "172.136.0.0/16" + otherBGPPeerSubnetIPv6 = "fc00:f853:ccd:136::/64" + otherBGPServerSubnetIPv4 = "172.138.0.0/16" + otherBGPServerSubnetIPv6 = "fc00:f853:ccd:138::/64" + otherUDNCIDRv4 = "103.203.0.0/16" + otherUDNCIDRv6 = "2014:200:200::0/60" + ) + + var ( + otherLayer3NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: "Primary", + Subnets: []udnv1.Layer3Subnet{{CIDR: otherUDNCIDRv4, HostSubnet: 24}, {CIDR: otherUDNCIDRv6, HostSubnet: 64}}, + }, + } + otherLayer2NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: "Primary", + Subnets: udnv1.DualStackCIDRs{otherUDNCIDRv4, otherUDNCIDRv6}, + }, + } + ) + + otherNetworksToTest := []ginkgo.TableEntry{ + ginkgo.Entry("Default", defaultNetwork, nil), + ginkgo.Entry("Layer 3 UDN non advertised", udn, otherLayer3NetworkSpec), + ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec), + ginkgo.Entry("Layer 3 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer3NetworkSpec), + ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec), + } + + ginkgo.DescribeTableSubtree("Of type", + func(networkType networkType, networkSpec *udnv1.NetworkSpec) { + var otherNamespace *corev1.Namespace + var otherNetworkName string + + ginkgo.BeforeEach(func() { + otherNetworkName = testBaseName + "-other" + otherNamespaceName := otherNetworkName + + switch { + case networkSpec == nil: + // noop + case networkSpec.Layer3 != nil: + networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + case networkSpec.Layer2 != nil: + networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) + } + + // we will create a agnhost server on an extra network peered with BGP + switch networkType { + case cudnAdvertisedVRFLite: + ginkgo.By("Running other BGP network with an agnhost server") + otherBGPServerName := otherNetworkName + "-bgpserver" + bgpPeerCIDRs := []string{otherBGPPeerSubnetIPv4, otherBGPPeerSubnetIPv6} + bgpServerCIDRs := []string{otherBGPServerSubnetIPv4, otherBGPServerSubnetIPv6} + gomega.Expect(runBGPNetworkAndServer(f, ictx, otherNetworkName, otherBGPServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) + case defaultNetwork: + otherNetworkName = "default" + } + + ginkgo.By("Creating the other namespace and network") + var err error + otherNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, otherNamespaceName, networkType, networkSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTableSubtree("And a pod runs on the other network", + func(getNode func() string) { + var otherPod *corev1.Pod + + ginkgo.BeforeEach(func() { + ginkgo.By("Running a pod on the other network namespace") + otherPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + otherNamespace.Name, + otherNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + p.Spec.NodeName = getNode() + p.Labels = map[string]string{"app": "netexec-pod"} + }, + ) + }) + + ginkgo.DescribeTable("The pod on the tested network cannot reach the pod on the other network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") + otherPodIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + otherPod.Namespace, + otherPod.Name, + otherNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, otherPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("The pod on the other network cannot reach the pod on the tested network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the other network pod cannot reach the tested network pod") + testPodIP, err := podIPOfFamilyOnPrimaryNetwork( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(otherPod, testPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.Describe("Backing a ClusterIP service", func() { + var service *corev1.Service + + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a service backed by the other network pod") + service = e2eservice.CreateServiceSpec( + "service-for-netexec", + "", + false, + otherPod.Labels, + ) + service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} + familyPolicy := corev1.IPFamilyPolicyPreferDualStack + service.Spec.IPFamilyPolicy = &familyPolicy + var err error + service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTable("The pod on the tested network cannot reach the service on the other network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") + clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) + gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, clusterIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + }) + }, + ginkgo.Entry("On the same node", getSameNode), + ginkgo.Entry("On a different node", getDifferentNode), + ) + }, + otherNetworksToTest, + ) + }) + }) + }, + networksToTest, + ) +}) + +// routeAdvertisementsReadyFunc returns a function that checks for the +// Accepted condition in the provided RouteAdvertisements +func routeAdvertisementsReadyFunc(c raclientset.Clientset, name string) func() error { + return func() error { + ra, err := c.K8sV1().RouteAdvertisements().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + conditionType := "Accepted" + condition := meta.FindStatusCondition(ra.Status.Conditions, conditionType) + if condition == nil { + return fmt.Errorf("no %q condition found in: %v", conditionType, ra) + } + if condition.Status != metav1.ConditionTrue { + return fmt.Errorf("condition %v has unexpected status %v", condition, condition.Status) + } + return nil + } +} + +// templateInputRouter data +type templateInputRouter struct { + VRF string + NeighborsIPv4 []string + NeighborsIPv6 []string + NetworksIPv4 []string + NetworksIPv6 []string +} + +// templateInputFRR data +type templateInputFRR struct { + // Name and Label are used for FRRConfiguration metadata + Name string + Labels map[string]string + Routers []templateInputRouter +} + +// for routeadvertisements test cases we generate configuration from templates embed in the program +// +//go:embed testdata/routeadvertisements +var ratestdata embed.FS +var tmplDir = filepath.Join("testdata", "routeadvertisements") + +const frrImage = "quay.io/frrouting/frr:9.1.3" + +// generateFRRConfiguration to establish a BGP session towards the provided +// neighbors in the network's VRF configured to advertised the provided +// networks. Returns a temporary directory where the configuration is generated. +func generateFRRConfiguration(neighborIPs, advertiseNetworks []string) (directory string, err error) { + // parse configuration templates + var templates *template.Template + templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr", "*.tmpl")) + if err != nil { + return "", fmt.Errorf("failed to parse templates: %w", err) + } + + // create the directory that will hold the configuration files + directory, err = os.MkdirTemp("", "frrconf-") + if err != nil { + return "", fmt.Errorf("failed to make temp directory: %w", err) + } + defer func() { + if err != nil { + os.RemoveAll(directory) + } + }() + + // generate external frr configuration executing the templates + networksIPv4, networksIPv6 := splitCIDRStringsByIPFamily(advertiseNetworks) + neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) + conf := templateInputFRR{ + Routers: []templateInputRouter{ + { + NeighborsIPv4: neighborsIPv4, + NetworksIPv4: networksIPv4, + NeighborsIPv6: neighborsIPv6, + NetworksIPv6: networksIPv6, + }, + }, + } + + err = executeFileTemplate(templates, directory, "frr.conf", conf) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "frr.conf", err) + } + err = executeFileTemplate(templates, directory, "daemons", nil) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "daemons", err) + } + + return directory, nil +} + +// generateFRRk8sConfiguration for the provided network (which doubles up as the +// FRRConfiguration instance name, VRF name and used as value of `network` +// label) to establish a BGP session towards the provided neighbors in the +// network's VRF, configured to receive advertisements for the provided +// networks. Returns a temporary directory where the configuration is generated. +func generateFRRk8sConfiguration(networkName string, neighborIPs, receiveNetworks []string) (directory string, err error) { + // parse configuration templates + var templates *template.Template + templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr-k8s", "*.tmpl")) + if err != nil { + return "", fmt.Errorf("failed to parse templates: %w", err) + } + + // create the directory that will hold the configuration files + directory, err = os.MkdirTemp("", "frrk8sconf-") + if err != nil { + return "", fmt.Errorf("failed to make temp directory: %w", err) + } + defer func() { + if err != nil { + os.RemoveAll(directory) + } + }() + + receivesIPv4, receivesIPv6 := splitCIDRStringsByIPFamily(receiveNetworks) + neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) + conf := templateInputFRR{ + Name: networkName, + Labels: map[string]string{"network": networkName}, + Routers: []templateInputRouter{ + { + VRF: networkName, + NeighborsIPv4: neighborsIPv4, + NeighborsIPv6: neighborsIPv6, + NetworksIPv4: receivesIPv4, + NetworksIPv6: receivesIPv6, + }, + }, + } + err = executeFileTemplate(templates, directory, "frrconf.yaml", conf) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "frrconf.yaml", err) + } + + return directory, nil +} + +// runBGPNetworkAndServer configures a topology appropriate to be used with +// route advertisement test cases. For VRF-Lite test cases, the caller is +// resposible to attach the peer network interface to the CUDN VRF on the nodes. +// +// ----------------- ------------------ --------------- +// | | serverNetwork | | peerNetwork | | +// | external |<--------------- | FRR router |<--( Default / CUDN VRF )-- | cluster | +// | server | | | | | +// ----------------- ------------------ --------------- +func runBGPNetworkAndServer( + f *framework.Framework, + ictx infraapi.Context, + networkName, serverName string, + peerNetworks, + serverNetworks []string, +) error { + // filter networks by supported IP families + families := getSupportedIPFamiliesSlice(f.ClientSet) + peerNetworks = matchCIDRStringsByIPFamily(peerNetworks, families...) + serverNetworks = matchCIDRStringsByIPFamily(serverNetworks, families...) + + // create BGP peer network + bgpPeerNetwork, err := ictx.CreateNetwork(networkName, peerNetworks...) + if err != nil { + return fmt.Errorf("failed to create peer network %v: %w", peerNetworks, err) + } + + // create the server network + serverNetwork, err := ictx.CreateNetwork(serverName, serverNetworks...) + if err != nil { + return fmt.Errorf("failed to create server network %v: %w", serverNetworks, err) + } + + // attach BGP peer network to all nodes + var nodeIPs []string + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list nodes: %w", err) + } + for _, node := range nodeList.Items { + iface, err := ictx.AttachNetwork(bgpPeerNetwork, node.Name) + if err != nil { + return fmt.Errorf("failed to attach node %q to network: %w", node.Name, err) + } + nodeIPs = append(nodeIPs, iface.IPv4, iface.IPv6) + } + + // run frr container + advertiseNetworks := serverNetworks + frrConfig, err := generateFRRConfiguration(nodeIPs, advertiseNetworks) + if err != nil { + return fmt.Errorf("failed to generate FRR configuration: %w", err) + } + ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrConfig) }) + frr := infraapi.ExternalContainer{ + Name: networkName + "-frr", + Image: frrImage, + Network: bgpPeerNetwork, + RuntimeArgs: []string{"--volume", frrConfig + ":" + filepath.Join(filepath.FromSlash("/"), "etc", "frr")}, + } + frr, err = ictx.CreateExternalContainer(frr) + if err != nil { + return fmt.Errorf("failed to create frr container: %w", err) + } + // enable IPv6 forwarding if required + if frr.IPv6 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(frr, []string{"sysctl", "-w", "net.ipv6.conf.all.forwarding=1"}) + if err != nil { + return fmt.Errorf("failed to set enable IPv6 forwading on frr container: %w", err) + } + } + + // connect frr to server network + frrServerNetworkInterface, err := ictx.AttachNetwork(serverNetwork, frr.Name) + if err != nil { + return fmt.Errorf("failed to connect frr to server network: %w", err) + } + + // run server container + server := infraapi.ExternalContainer{ + Name: serverName, + Image: images.AgnHost(), + CmdArgs: []string{"netexec"}, + Network: serverNetwork, + } + _, err = ictx.CreateExternalContainer(server) + if err != nil { + return fmt.Errorf("failed to create BGP server container: %w", err) + } + + // set frr as default gateway for the server + if frrServerNetworkInterface.IPv4 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "route", "add", "default", "via", frrServerNetworkInterface.IPv4}) + if err != nil { + return fmt.Errorf("failed to set default IPv4 gateway on BGP server container: %w", err) + } + } + if frrServerNetworkInterface.IPv6 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "-6", "route", "add", "default", "via", frrServerNetworkInterface.IPv6}) + if err != nil { + return fmt.Errorf("failed to set default IPv6 gateway on BGP server container: %w", err) + } + + } + + // apply FRR-K8s Configuration + receiveNetworks := serverNetworks + frrK8sConfig, err := generateFRRk8sConfiguration(networkName, []string{frr.IPv4, frr.IPv6}, receiveNetworks) + if err != nil { + return fmt.Errorf("failed to generate FRR-k8s configuration: %w", err) + } + ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrK8sConfig) }) + _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "create", "-f", frrK8sConfig) + if err != nil { + return fmt.Errorf("failed to apply FRRConfiguration: %w", err) + } + ictx.AddCleanUpFn(func() error { + _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "delete", "-f", frrK8sConfig) + if err != nil { + return fmt.Errorf("failed to delete FRRConfiguration: %w", err) + } + return nil + }) + + return nil +} + +type networkType string + +const ( + defaultNetwork networkType = "DEFAULT" + udn networkType = "UDN" + cudn networkType = "CUDN" + cudnAdvertised networkType = "CUDN_ADVERTISED" + cudnAdvertisedVRFLite networkType = "CUDN_ADVERTISED_VRFLITE" +) + +// createNamespaceWithPrimaryNetworkOfType helper function configures a +// namespace, a optional(C)UDN and an optional RouteAdvertisements as determined +// by `networkType` argument. The RouteAdvertisements is aligned with the +// configuration done with `runBGPNetworkAndServer` for VRF-Lite scenarios. +func createNamespaceWithPrimaryNetworkOfType( + f *framework.Framework, + ictx infraapi.Context, + test, name string, + networkType networkType, + networkSpec *udnv1.NetworkSpec, +) (*corev1.Namespace, error) { + // define some configuration based on the type of namespace/network/advertisement + var targetVRF string + var networkLabels map[string]string + var frrConfigurationLabels map[string]string + switch networkType { + case cudnAdvertised: + networkLabels = map[string]string{"advertise": name} + frrConfigurationLabels = map[string]string{"name": "receive-all"} + case cudnAdvertisedVRFLite: + targetVRF = name + networkLabels = map[string]string{"advertise": name} + frrConfigurationLabels = map[string]string{"network": name} + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "e2e-framework": test, + }, + }, + } + if networkType != defaultNetwork { + namespace.Labels[RequiredUDNNamespaceLabel] = "" + } + namespace, err := f.ClientSet.CoreV1().Namespaces().Create( + context.Background(), + namespace, + metav1.CreateOptions{}, + ) + if err != nil { + return nil, fmt.Errorf("failed to create namespace: %w", err) + } + ictx.AddCleanUpFn(func() error { + return f.ClientSet.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{}) + }) + + // just creating a namespace with default network, return + if networkType == defaultNetwork { + return namespace, nil + } + + err = createUserDefinedNetwork( + f, + ictx, + namespace, + name, + networkType != udn, + networkSpec, + networkLabels, + ) + if err != nil { + return nil, fmt.Errorf("failed to create primary network: %w", err) + } + + // not advertised, return + if networkType == udn || networkType == cudn { + return namespace, nil + } + + err = createRouteAdvertisements( + f, + ictx, + name, + targetVRF, + networkLabels, + frrConfigurationLabels, + ) + if err != nil { + return nil, fmt.Errorf("failed to create primary network: %w", err) + } + + return namespace, nil +} + +func createUserDefinedNetwork( + f *framework.Framework, + ictx infraapi.Context, + namespace *corev1.Namespace, + name string, + cudnType bool, + networkSpec *udnv1.NetworkSpec, + networkLabels map[string]string, +) error { + var gvr schema.GroupVersionResource + var gvk schema.GroupVersionKind + var obj runtime.Object + var client dynamic.ResourceInterface + switch { + case cudnType: + gvr = clusterUDNGVR + gvk = schema.GroupVersionKind{ + Group: gvr.Group, + Version: gvr.Version, + Kind: "ClusterUserDefinedNetwork", + } + client = f.DynamicClient.Resource(gvr) + obj = &udnv1.ClusterUserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: networkLabels, + }, + Spec: udnv1.ClusterUserDefinedNetworkSpec{ + NamespaceSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{namespace.Name}, + }}}, + Network: *networkSpec, + }, + } + default: + gvr = udnGVR + gvk = schema.GroupVersionKind{ + Group: gvr.Group, + Version: gvr.Version, + Kind: "UserDefinedNetwork", + } + client = f.DynamicClient.Resource(gvr).Namespace(namespace.Name) + obj = &udnv1.UserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace.Name, + Labels: networkLabels, + }, + Spec: udnv1.UserDefinedNetworkSpec{ + Topology: networkSpec.Topology, + Layer3: networkSpec.Layer3, + Layer2: networkSpec.Layer2, + }, + } + } + + unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return fmt.Errorf("failed to convert network to unstructured: %w", err) + } + unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap} + ok := unstructuredObj.GetObjectKind() + ok.SetGroupVersionKind(gvk) + + _, err = client.Create(context.Background(), unstructuredObj, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to convert network to unstructured: %w", err) + } + ictx.AddCleanUpFn(func() error { + return client.Delete(context.Background(), name, metav1.DeleteOptions{}) + }) + wait.PollUntilContextTimeout( + context.Background(), + time.Second, + 5*time.Second, + true, + func(ctx context.Context) (bool, error) { + err = networkReadyFunc(client, name)() + return err == nil, nil + }, + ) + if err != nil { + return fmt.Errorf("failed to wait for the network to be ready: %w", err) + } + + return nil +} + +func createRouteAdvertisements( + f *framework.Framework, + ictx infraapi.Context, + name string, + targetVRF string, + networkMatchLabels map[string]string, + frrconfigurationMatchLabels map[string]string, +) error { + ra := &rav1.RouteAdvertisements{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: rav1.RouteAdvertisementsSpec{ + NetworkSelectors: apitypes.NetworkSelectors{ + apitypes.NetworkSelector{ + NetworkSelectionType: apitypes.ClusterUserDefinedNetworks, + ClusterUserDefinedNetworkSelector: &apitypes.ClusterUserDefinedNetworkSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: networkMatchLabels, + }, + }, + }, + }, + FRRConfigurationSelector: metav1.LabelSelector{ + MatchLabels: frrconfigurationMatchLabels, + }, + NodeSelector: metav1.LabelSelector{}, + Advertisements: []rav1.AdvertisementType{ + rav1.PodNetwork, + }, + TargetVRF: targetVRF, + }, + } + + raClient, err := raclientset.NewForConfig(f.ClientConfig()) + if err != nil { + return fmt.Errorf("failed to create RouteAdvertisements client: %w", err) + } + _, err = raClient.K8sV1().RouteAdvertisements().Create(context.TODO(), ra, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create RouteAdvertisements: %w", err) + } + ictx.AddCleanUpFn(func() error { + return raClient.K8sV1().RouteAdvertisements().Delete(context.Background(), name, metav1.DeleteOptions{}) + }) + wait.PollUntilContextTimeout( + context.Background(), + time.Second, + 5*time.Second, + true, + func(ctx context.Context) (bool, error) { + err = routeAdvertisementsReadyFunc(*raClient, name)() + return err == nil, nil + }, + ) + if err != nil { + return fmt.Errorf("failed to wait for the RouteAdvertisements to be ready: %w", err) + } + + return nil +} diff --git a/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl b/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl new file mode 100644 index 0000000000..ba4b4605ad --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl @@ -0,0 +1,46 @@ +{{- define "frrconf.yaml" -}} +apiVersion: frrk8s.metallb.io/v1beta1 +kind: FRRConfiguration +metadata: + name: {{ .Name }} +{{- if .Labels }} + labels: +{{- range $k, $v := .Labels }} + {{ $k }}: {{ $v }} +{{- end }} +{{- end }} +spec: + bgp: + routers: +{{- range $v := .Routers }} + - asn: 64512 +{{- if .VRF }} + vrf: {{ .VRF }} +{{- end }} + neighbors: +{{- range .NeighborsIPv4 }} + - address: {{ . }} + asn: 64512 + disableMP: true + toReceive: + allowed: + mode: filtered + prefixes: +{{- range $v.NetworksIPv4 }} + - prefix: {{ . }} +{{- end }} +{{- end }} +{{- range .NeighborsIPv6 }} + - address: {{ . }} + asn: 64512 + disableMP: true + toReceive: + allowed: + mode: filtered + prefixes: +{{- range $v.NetworksIPv6 }} + - prefix: {{ . }} +{{- end }} +{{- end }} +{{- end }} +{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl b/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl new file mode 100644 index 0000000000..5434bdf418 --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl @@ -0,0 +1,82 @@ +{{- define "daemons" -}} +# This file tells the frr package which daemons to start. +# +# Sample configurations for these daemons can be found in +# /usr/share/doc/frr/examples/. +# +# ATTENTION: +# +# When activating a daemon for the first time, a config file, even if it is +# empty, has to be present *and* be owned by the user and group "frr", else +# the daemon will not be started by /etc/init.d/frr. The permissions should +# be u=rw,g=r,o=. +# When using "vtysh" such a config file is also needed. It should be owned by +# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. +# +# The watchfrr and zebra daemons are always started. +# +bgpd=yes +ospfd=no +ospf6d=no +ripd=no +ripngd=no +isisd=no +pimd=no +ldpd=no +nhrpd=no +eigrpd=no +babeld=no +sharpd=no +pbrd=no +bfdd=yes +fabricd=no +vrrpd=no + +# +# If this option is set the /etc/init.d/frr script automatically loads +# the config via "vtysh -b" when the servers are started. +# Check /etc/pam.d/frr if you intend to use "vtysh"! +# +vtysh_enable=yes +zebra_options=" -A 127.0.0.1 -s 90000000" +bgpd_options=" -A 127.0.0.1" +ospfd_options=" -A 127.0.0.1" +ospf6d_options=" -A ::1" +ripd_options=" -A 127.0.0.1" +ripngd_options=" -A ::1" +isisd_options=" -A 127.0.0.1" +pimd_options=" -A 127.0.0.1" +ldpd_options=" -A 127.0.0.1" +nhrpd_options=" -A 127.0.0.1" +eigrpd_options=" -A 127.0.0.1" +babeld_options=" -A 127.0.0.1" +sharpd_options=" -A 127.0.0.1" +pbrd_options=" -A 127.0.0.1" +staticd_options="-A 127.0.0.1" +bfdd_options=" -A 127.0.0.1" +fabricd_options="-A 127.0.0.1" +vrrpd_options=" -A 127.0.0.1" + +# configuration profile +# +#frr_profile="traditional" +#frr_profile="datacenter" + +# +# This is the maximum number of FD's that will be available. +# Upon startup this is read by the control files and ulimit +# is called. Uncomment and use a reasonable value for your +# setup if you are expecting a large number of peers in +# say BGP. +#MAX_FDS=1024 + +# The list of daemons to watch is automatically generated by the init script. +#watchfrr_options="" + +# for debugging purposes, you can specify a "wrap" command to start instead +# of starting the daemon directly, e.g. to use valgrind on ospfd: +# ospfd_wrap="/usr/bin/valgrind" +# or you can use "all_wrap" for all daemons, e.g. to use perf record: +# all_wrap="/usr/bin/perf record --call-graph -" +# the normal daemon command is added to this at the end. +{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl b/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl new file mode 100644 index 0000000000..a1beeab410 --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl @@ -0,0 +1,57 @@ +{{- define "frr.conf" -}} +debug zebra events +debug zebra nht detailed +debug zebra kernel +debug zebra rib detail +debug zebra nexthop detail +debug bgp keepalives +debug bgp neighbor-events +debug bgp nht +debug bgp updates +debug bgp zebra +log stdout debugging +log syslog debugging +log file /etc/frr/frr.log debugging +{{ range .Routers -}} +router bgp 64512 {{ if .VRF }}vrf {{ .VRF }}{{ end }} + no bgp default ipv4-unicast + no bgp default ipv6-unicast + no bgp network import-check +{{- range .NeighborsIPv4 }} + neighbor {{ . }} remote-as 64512 + # zebra has been observed to fail to start for unknown reasons, + # reduce timers to try to minimize delay impact on tests + neighbor {{ . }} timers connect 10 + neighbor {{ . }} timers 15 5 +{{- end }} +{{- range .NeighborsIPv6 }} + neighbor {{ . }} remote-as 64512 + neighbor {{ . }} timers connect 10 + neighbor {{ . }} timers 15 5 +{{- end }} +{{- if .NeighborsIPv4 }} + address-family ipv4 unicast +{{- range .NeighborsIPv4 }} + neighbor {{ . }} route-reflector-client + neighbor {{ . }} activate + neighbor {{ . }} next-hop-self +{{- end }} +{{- range .NetworksIPv4 }} + network {{ . }} +{{- end }} + exit-address-family +{{- end }} +{{- if .NeighborsIPv6 }} + address-family ipv6 unicast +{{- range .NeighborsIPv6 }} + neighbor {{ . }} route-reflector-client + neighbor {{ . }} activate + neighbor {{ . }} next-hop-self +{{- end }} +{{- range .NetworksIPv6 }} + network {{ . }} +{{- end }} + exit-address-family +{{- end }} +{{ end }} +{{ end }} diff --git a/test/e2e/util.go b/test/e2e/util.go index e31bffe724..07bb4f9f76 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -7,9 +7,11 @@ import ( "math/rand" "net" "os" + "path/filepath" "regexp" "strconv" "strings" + "text/template" "time" "github.com/onsi/ginkgo/v2" @@ -30,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/debug" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" @@ -167,7 +168,7 @@ func newAgnhostPodOnNode(name, nodeName string, labels map[string]string, comman } // IsIPv6Cluster returns true if the kubernetes default service is IPv6 -func IsIPv6Cluster(c clientset.Interface) bool { +func IsIPv6Cluster(c kubernetes.Interface) bool { // Get the ClusterIP of the kubernetes service created in the default namespace svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.Background(), "kubernetes", metav1.GetOptions{}) if err != nil { @@ -656,7 +657,7 @@ func waitClusterHealthy(f *framework.Framework, numControlPlanePods int, control // successfully rolled out following an update. // // If allowedNotReadyNodes is -1, this method returns immediately without waiting. -func waitForRollout(c clientset.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { +func waitForRollout(c kubernetes.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { if allowedNotReadyNodes == -1 { return nil } @@ -1129,24 +1130,24 @@ func randStr(n int) string { return string(b) } -func isCIDRIPFamilySupported(cs clientset.Interface, cidr string) bool { +func isCIDRIPFamilySupported(cs kubernetes.Interface, cidr string) bool { ginkgo.GinkgoHelper() gomega.Expect(cidr).To(gomega.ContainSubstring("/")) isIPv6 := utilnet.IsIPv6CIDRString(cidr) return (isIPv4Supported(cs) && !isIPv6) || (isIPv6Supported(cs) && isIPv6) } -func isIPv4Supported(cs clientset.Interface) bool { +func isIPv4Supported(cs kubernetes.Interface) bool { v4, _ := getSupportedIPFamilies(cs) return v4 } -func isIPv6Supported(cs clientset.Interface) bool { +func isIPv6Supported(cs kubernetes.Interface) bool { _, v6 := getSupportedIPFamilies(cs) return v6 } -func getSupportedIPFamilies(cs clientset.Interface) (bool, bool) { +func getSupportedIPFamilies(cs kubernetes.Interface) (bool, bool) { n, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) framework.ExpectNoError(err, "must fetch a Ready Node") v4NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv4Protocol) @@ -1154,6 +1155,19 @@ func getSupportedIPFamilies(cs clientset.Interface) (bool, bool) { return len(v4NodeAddrs) > 0, len(v6NodeAddrs) > 0 } +func getSupportedIPFamiliesSlice(cs kubernetes.Interface) []utilnet.IPFamily { + v4, v6 := getSupportedIPFamilies(cs) + switch { + case v4 && v6: + return []utilnet.IPFamily{utilnet.IPv4, utilnet.IPv6} + case v4: + return []utilnet.IPFamily{utilnet.IPv4} + case v6: + return []utilnet.IPFamily{utilnet.IPv6} + } + return nil +} + func isInterconnectEnabled() bool { val, present := os.LookupEnv("OVN_ENABLE_INTERCONNECT") return present && val == "true" @@ -1281,7 +1295,7 @@ func GetNodeIPv6LinkLocalAddressForEth0(nodeName string) (string, error) { // right-most match of the provided regex. Returns a map of subexpression name // to subexpression capture. A zero string name `""` maps to the full expression // capture. -func CaptureContainerOutput(ctx context.Context, c clientset.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { +func CaptureContainerOutput(ctx context.Context, c kubernetes.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { regex, err := regexp.Compile(regexpr) if err != nil { return nil, fmt.Errorf("failed to compile regexp %q: %w", regexpr, err) @@ -1352,9 +1366,62 @@ func matchIPv6StringFamily(ipStrings []string) (string, error) { return util.MatchIPStringFamily(true /*ipv6*/, ipStrings) } +func matchCIDRStringsByIPFamily(cidrs []string, families ...utilnet.IPFamily) []string { + var r []string + familySet := sets.New(families...) + for _, cidr := range cidrs { + if familySet.Has(utilnet.IPFamilyOfCIDRString(cidr)) { + r = append(r, cidr) + } + } + return r +} + +func splitCIDRStringsByIPFamily(cidrs []string) (ipv4 []string, ipv6 []string) { + for _, cidr := range cidrs { + switch { + case utilnet.IsIPv4CIDRString(cidr): + ipv4 = append(ipv4, cidr) + case utilnet.IsIPv6CIDRString(cidr): + ipv6 = append(ipv6, cidr) + } + } + return +} + +func splitIPStringsByIPFamily(ips []string) (ipv4 []string, ipv6 []string) { + for _, ip := range ips { + switch { + case utilnet.IsIPv4String(ip): + ipv4 = append(ipv4, ip) + case utilnet.IsIPv6String(ip): + ipv6 = append(ipv6, ip) + } + } + return +} + +func getFirstCIDROfFamily(family utilnet.IPFamily, ipnets []*net.IPNet) *net.IPNet { + for _, ipnet := range ipnets { + if utilnet.IPFamilyOfCIDR(ipnet) == family { + return ipnet + } + } + return nil +} + +func getFirstIPStringOfFamily(family utilnet.IPFamily, ips []string) string { + for _, ip := range ips { + if utilnet.IPFamilyOfString(ip) == family { + return ip + } + } + return "" +} + // This is a replacement for e2epod.DeletePodWithWait(), which does not handle pods that // may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error { +func deletePodWithWait(ctx context.Context, c kubernetes.Interface, pod *v1.Pod) error { if pod == nil { return nil } @@ -1382,7 +1449,7 @@ func deletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) // This is a replacement for e2epod.DeletePodWithWaitByName(), which does not handle pods // that may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error { +func deletePodWithWaitByName(ctx context.Context, c kubernetes.Interface, podName, podNamespace string) error { pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -1400,7 +1467,7 @@ func deletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName // This is an alternative version of e2epod.WaitForPodNotFoundInNamespace(), which takes // a UID as well. -func waitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { +func waitForPodNotFoundInNamespace(ctx context.Context, c kubernetes.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*v1.Pod, error) { pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -1434,3 +1501,38 @@ func getAgnHostHTTPPortBindFullCMD(port uint16) []string { func getAgnHostHTTPPortBindCMDArgs(port uint16) []string { return []string{"netexec", fmt.Sprintf("--http-port=%d", port)} } + +// executeFileTemplate executes `name` template from the provided `templates` +// using `data`as input and writes the results to `directory/name` +func executeFileTemplate(templates *template.Template, directory, name string, data any) error { + f, err := os.OpenFile(filepath.Join(directory, name), os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + defer f.Close() + err = templates.ExecuteTemplate(f, name, data) + if err != nil { + return err + } + return nil +} + +// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN +func podIPOfFamilyOnPrimaryNetwork(k8sClient kubernetes.Interface, podNamespace string, podName string, networkName string, family utilnet.IPFamily) (string, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if networkName != "default" { + networkName = namespacedName(podNamespace, networkName) + } + netStatus, err := userDefinedNetworkStatus(pod, networkName) + if err != nil { + return "", err + } + ipnet := getFirstCIDROfFamily(family, netStatus.IPs) + if ipnet == nil { + return "", nil + } + return ipnet.IP.String(), nil +} From dfc14b4eb310d7c4b643408cb6bcb8ca9a634b28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Tue, 15 Jul 2025 09:16:46 +0000 Subject: [PATCH 153/181] e2e: refactor podIPOfFamilyOnPrimaryNetwork into more reusable code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/egressip.go | 2 +- test/e2e/multihoming_utils.go | 36 +++++++++++++++++++++++ test/e2e/network_segmentation.go | 37 +++--------------------- test/e2e/network_segmentation_policy.go | 6 ++-- test/e2e/network_segmentation_utils.go | 22 ++++++++++++++ test/e2e/route_advertisements.go | 38 ++++++++++++------------- test/e2e/util.go | 20 ------------- 7 files changed, 85 insertions(+), 76 deletions(-) create mode 100644 test/e2e/network_segmentation_utils.go diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index 1bdc03adec..b2f75254f7 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -972,7 +972,7 @@ spec: if isClusterDefaultNetwork(netConfigParams) { pod2IP = getPodAddress(pod2Name, f.Namespace.Name) } else { - pod2IP, err = podIPsForUserDefinedPrimaryNetwork( + pod2IP, err = getPodAnnotationIPsForAttachmentByIndex( f.ClientSet, f.Namespace.Name, pod2Name, diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index 2fb10354d4..d7921cef00 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -704,3 +704,39 @@ func getNetworkGateway(cli *client.Client, networkName string) (string, error) { return "", fmt.Errorf("Gateway not found for network %q", networkName) } + +func getPodAnnotationForAttachment(pod *v1.Pod, attachmentName string) (PodAnnotation, error) { + podAnnotation, err := unmarshalPodAnnotation(pod.Annotations, attachmentName) + if err != nil { + return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) + } + + return *podAnnotation, nil +} + +func getPodAnnotationIPsForAttachment(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string) ([]*net.IPNet, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + podAnnotation, err := getPodAnnotationForAttachment(pod, attachmentName) + if err != nil { + return nil, err + } + return podAnnotation.IPs, nil +} + +// podIPsForNetworkByIndex returns the v4 or v6 IPs for a pod on the UDN +func getPodAnnotationIPsForAttachmentByIndex(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { + ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, attachmentName) + if err != nil { + return "", err + } + if index >= len(ipnets) { + return "", fmt.Errorf("no IP at index %d for attachment %s on pod %s", index, attachmentName, namespacedName(podNamespace, podName)) + } + if len(ipnets) > 2 { + return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) + } + return ipnets[index].IP.String(), nil +} diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index dec466f423..bb667b2d94 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -175,7 +175,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { By("asserting the server pod has an IP from the configured range") - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, f.Namespace.Name, serverPodConfig.name, @@ -610,7 +610,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By("creating pod " + podConfig.name + " in " + podConfig.namespace) pod := runUDNPod(cs, podConfig.namespace, podConfig, nil) pods = append(pods, pod) - podIP, err := podIPsForUserDefinedPrimaryNetwork( + podIP, err := getPodAnnotationIPsForAttachmentByIndex( cs, pod.Namespace, pod.Name, @@ -792,7 +792,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { By(fmt.Sprintf("asserting network works in namespace %s", config.namespace)) for i, cidr := range strings.Split(config.cidr, ",") { if cidr != "" { - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, config.namespace, serverPodConfig.name, @@ -1756,7 +1756,7 @@ spec: clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: node2Name} runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil) runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil) - serverIP, err := podIPsForUserDefinedPrimaryNetwork(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) + serverIP, err := getPodAnnotationIPsForAttachmentByIndex(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) Expect(err).ShouldNot(HaveOccurred(), "UDN pod IP must be retrieved") By("restart OVNKube node pods on client and server Nodes and ensure connectivity") serverPod := getPod(f, serverPodConfig.name) @@ -2275,26 +2275,6 @@ func withNetworkAttachment(networks []nadapi.NetworkSelectionElement) podOption } } -// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN -func podIPsForUserDefinedPrimaryNetwork(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { - pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return "", err - } - netStatus, err := userDefinedNetworkStatus(pod, attachmentName) - if err != nil { - return "", err - } - - if len(netStatus.IPs) == 0 { - return "", fmt.Errorf("attachment for network %q without IPs", attachmentName) - } - if len(netStatus.IPs) > 2 { - return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) - } - return netStatus.IPs[index].IP.String(), nil -} - func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, podName string) (string, string, error) { pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) if err != nil { @@ -2304,15 +2284,6 @@ func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, return ipv4, ipv6, nil } -func userDefinedNetworkStatus(pod *v1.Pod, networkName string) (PodAnnotation, error) { - netStatus, err := unmarshalPodAnnotation(pod.Annotations, networkName) - if err != nil { - return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) - } - - return *netStatus, nil -} - func runUDNPod(cs clientset.Interface, namespace string, serverPodConfig podConfiguration, podSpecTweak func(*v1.Pod)) *v1.Pod { By(fmt.Sprintf("instantiating the UDN pod %s", serverPodConfig.name)) podSpec := generatePodSpec(serverPodConfig) diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index ffcf5f728a..2b71ebea5c 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -103,7 +103,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { ginkgo.By("asserting the server pod has an IP from the configured range") - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, f.Namespace.Name, serverPodConfig.name, @@ -231,12 +231,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ } subnet, err := getNetCIDRSubnet(cidr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - allowServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, allowServerPodConfig.name, + allowServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, allowServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the allow server pod IP %v is from the configured range %v", allowServerPodIP, cidr)) gomega.Expect(inRange(subnet, allowServerPodIP)).To(gomega.Succeed()) - denyServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, denyServerPodConfig.name, + denyServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, denyServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the deny server pod IP %v is from the configured range %v", denyServerPodIP, cidr)) diff --git a/test/e2e/network_segmentation_utils.go b/test/e2e/network_segmentation_utils.go new file mode 100644 index 0000000000..960b6889c7 --- /dev/null +++ b/test/e2e/network_segmentation_utils.go @@ -0,0 +1,22 @@ +package e2e + +import ( + "k8s.io/client-go/kubernetes" + "k8s.io/utils/net" +) + +// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN +func getPodAnnotationIPsForPrimaryNetworkByIPFamily(k8sClient kubernetes.Interface, podNamespace string, podName string, networkName string, family net.IPFamily) (string, error) { + if networkName != "default" { + networkName = namespacedName(podNamespace, networkName) + } + ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, networkName) + if err != nil { + return "", err + } + ipnet := getFirstCIDROfFamily(family, ipnets) + if ipnet == nil { + return "", nil + } + return ipnet.IP.String(), nil +} diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 08e6f11965..3b868558b0 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -407,7 +407,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ginkgo.By("queries to the external server are not SNATed (uses podIP)") for _, serverContainerIP := range serverContainerIPs { - podIP, err := podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) + podIP, err := getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err, fmt.Sprintf("Getting podIPs for pod %s failed: %v", clientPod.Name, err)) framework.Logf("Client pod IP address=%s", podIP) @@ -426,7 +426,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { - podIP, err = podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) + podIP, err = getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") gomega.Expect(outputIP).To(gomega.Equal(podIP), @@ -838,9 +838,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[1] - clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -850,9 +850,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[2] - clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -862,7 +862,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[2] srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -873,7 +873,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -883,7 +883,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -893,7 +893,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -922,7 +922,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[0].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -932,7 +932,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[2].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -1301,7 +1301,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { testPodToHostnameAndExpect(testPod, serverIP, bgpServerName) ginkgo.By("Ensuring a request from the pod is not SNATed") - testPodIP, err := podIPOfFamilyOnPrimaryNetwork( + testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, testPod.Namespace, testPod.Name, @@ -1328,7 +1328,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) - podIP, err := podIPOfFamilyOnPrimaryNetwork( + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, testPod.Namespace, testPod.Name, @@ -1381,7 +1381,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { e2eskipper.Skipf("IP family %v not supported", family) } ginkgo.By("Ensuring a request from the external server cannot reach the pod") - podIP, err := podIPOfFamilyOnPrimaryNetwork( + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, testPod.Namespace, testPod.Name, @@ -1405,7 +1405,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { e2eskipper.Skipf("IP family %v not supported", family) } ginkgo.By("Ensuring a request from the node cannot reach the tested network pod") - podIP, err := podIPOfFamilyOnPrimaryNetwork( + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, testPod.Namespace, testPod.Name, @@ -1448,7 +1448,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { e2eskipper.Skipf("IP family %v not supported", family) } ginkgo.By("Ensuring a request from the first pod can reach the second pod") - otherPodIP, err := podIPOfFamilyOnPrimaryNetwork( + otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, otherPod.Namespace, otherPod.Name, @@ -1597,7 +1597,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { e2eskipper.Skipf("IP family %v not supported", family) } ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") - otherPodIP, err := podIPOfFamilyOnPrimaryNetwork( + otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, otherPod.Namespace, otherPod.Name, @@ -1618,7 +1618,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { e2eskipper.Skipf("IP family %v not supported", family) } ginkgo.By("Ensuring a request from the other network pod cannot reach the tested network pod") - testPodIP, err := podIPOfFamilyOnPrimaryNetwork( + testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( f.ClientSet, testPod.Namespace, testPod.Name, diff --git a/test/e2e/util.go b/test/e2e/util.go index 07bb4f9f76..d03559e79e 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1516,23 +1516,3 @@ func executeFileTemplate(templates *template.Template, directory, name string, d } return nil } - -// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN -func podIPOfFamilyOnPrimaryNetwork(k8sClient kubernetes.Interface, podNamespace string, podName string, networkName string, family utilnet.IPFamily) (string, error) { - pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return "", err - } - if networkName != "default" { - networkName = namespacedName(podNamespace, networkName) - } - netStatus, err := userDefinedNetworkStatus(pod, networkName) - if err != nil { - return "", err - } - ipnet := getFirstCIDROfFamily(family, netStatus.IPs) - if ipnet == nil { - return "", nil - } - return ipnet.IP.String(), nil -} From e72e62b359f3566c9e1eb16087b7cba2e9f1bc07 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 22 Jul 2025 13:11:19 +0000 Subject: [PATCH 154/181] Remove unused portbinding code It's not used since e4f360cc82c4202175b8667398a4dcc7334aafd9 merged. Signed-off-by: Ihar Hrachyshka --- go-controller/pkg/libovsdb/ops/portbinding.go | 53 ------------------- 1 file changed, 53 deletions(-) delete mode 100644 go-controller/pkg/libovsdb/ops/portbinding.go diff --git a/go-controller/pkg/libovsdb/ops/portbinding.go b/go-controller/pkg/libovsdb/ops/portbinding.go deleted file mode 100644 index 0267a794c0..0000000000 --- a/go-controller/pkg/libovsdb/ops/portbinding.go +++ /dev/null @@ -1,53 +0,0 @@ -package ops - -import ( - "fmt" - - libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" -) - -// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that -// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds -// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis -// record for the remote zone nodes. -// TODO (numans) remove this function once OVN supports binding a port binding for a remote -// chassis. -func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error { - ch, err := GetChassis(sbClient, chassis) - if err != nil { - return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err) - } - portBinding.Chassis = &ch.UUID - - opModel := operationModel{ - Model: portBinding, - OnModelUpdates: []interface{}{&portBinding.Chassis}, - ErrNotFound: true, - BulkOp: false, - } - - m := newModelClient(sbClient) - _, err = m.CreateOrUpdate(opModel) - return err -} - -// GetPortBinding looks up a portBinding in SBDB -func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) { - found := []*sbdb.PortBinding{} - opModel := operationModel{ - Model: portBinding, - ExistingResult: &found, - ErrNotFound: true, - BulkOp: false, - } - - m := newModelClient(sbClient) - err := m.Lookup(opModel) - if err != nil { - return nil, err - } - - return found[0], nil -} From 34b5a46c8f66ffd74e12c3351282e5ce576370c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Fri, 18 Jul 2025 13:28:50 +0000 Subject: [PATCH 155/181] e2e: test against L2 networks in VRF-Lite test cases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/route_advertisements.go | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 3b868558b0..59b3eb4a0b 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -1078,6 +1078,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { // long timeouts timeout = 240 * time.Second timeoutNOK = 10 * time.Second + pollingNOK = 1 * time.Second netexecPort = 8080 ) var netexecPortStr = fmt.Sprintf("%d", netexecPort) @@ -1132,7 +1133,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { ip, _, err = net.SplitHostPort(ip) g.Expect(err).NotTo(gomega.HaveOccurred()) g.Expect(ip).To(gomega.Equal(expect)) - }).WithTimeout(timeout).WithPolling(framework.Poll).Should(gomega.Succeed()) + }).WithTimeout(timeout).WithPolling(pollingNOK).Should(gomega.Succeed()) } testPodToClientIPNOK := func(src *corev1.Pod, dstIP string) { gomega.Consistently(func(g gomega.Gomega) { @@ -1142,7 +1143,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), ) g.Expect(err).To(gomega.HaveOccurred()) - }).WithTimeout(timeoutNOK).WithPolling(framework.Poll).Should(gomega.Succeed()) + }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) } testContainerToClientIPNOK := func(src, dstIP string) { gomega.Consistently(func(g gomega.Gomega) { @@ -1151,7 +1152,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, ) g.Expect(err).To(gomega.HaveOccurred()) - }).WithTimeout(timeoutNOK).WithPolling(framework.Poll).Should(gomega.Succeed()) + }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) } const ( @@ -1202,6 +1203,13 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { Subnets: []udnv1.Layer3Subnet{{CIDR: cudnCIDRv4, HostSubnet: 24}, {CIDR: cudnCIDRv6, HostSubnet: 64}}, }, } + layer2NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: "Primary", + Subnets: udnv1.DualStackCIDRs{cudnCIDRv4, cudnCIDRv6}, + }, + } ) matchL3SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.Layer3Subnet) (out []udnv1.Layer3Subnet) { @@ -1223,6 +1231,7 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { networksToTest := []ginkgo.TableEntry{ ginkgo.Entry("Layer 3", layer3NetworkSpec), + ginkgo.Entry("Layer 2", layer2NetworkSpec), } ginkgo.DescribeTableSubtree("When the tested network is of type", @@ -1248,7 +1257,13 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { ginkgo.BeforeEach(func() { var err error - networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + + switch { + case networkSpec.Layer3 != nil: + networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + case networkSpec.Layer2 != nil: + networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) + } ginkgo.By("Configuring the namespace and network") testNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, testNetworkName, cudnAdvertisedVRFLite, networkSpec) @@ -1534,6 +1549,8 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec), ginkgo.Entry("Layer 3 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer3NetworkSpec), ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec), + ginkgo.Entry("Layer 2 CUDN advertised", cudnAdvertised, otherLayer2NetworkSpec), + ginkgo.Entry("Layer 2 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer2NetworkSpec), } ginkgo.DescribeTableSubtree("Of type", From d127877f72b68a6ffd0945cf4f2e38925096ab47 Mon Sep 17 00:00:00 2001 From: Miguel Duarte Barroso Date: Thu, 12 Jun 2025 18:42:04 +0200 Subject: [PATCH 156/181] build, vendor: consume ipamclaims v0.5.0-alpha This version of IPAMClaims features a conditions array in the status sub-resouce (where IPAM CNIs can report errors), along with a place to register the name of the pod which holds the claim (for better traceability). Signed-off-by: Miguel Duarte Barroso --- go-controller/go.mod | 8 +- go-controller/go.sum | 16 +- .../apis/clientset/versioned/clientset.go | 6 +- .../versioned/fake/clientset_generated.go | 8 +- .../apis/clientset/versioned/fake/doc.go | 2 +- .../apis/clientset/versioned/fake/register.go | 2 +- .../apis/clientset/versioned/scheme/doc.go | 2 +- .../clientset/versioned/scheme/register.go | 2 +- .../typed/ipamclaims/v1alpha1/doc.go | 2 +- .../typed/ipamclaims/v1alpha1/fake/doc.go | 2 +- .../v1alpha1/fake/fake_ipamclaim.go | 135 +++----------- .../v1alpha1/fake/fake_ipamclaims_client.go | 4 +- .../v1alpha1/generated_expansion.go | 2 +- .../typed/ipamclaims/v1alpha1/ipamclaim.go | 165 +++--------------- .../ipamclaims/v1alpha1/ipamclaims_client.go | 12 +- .../informers/externalversions/factory.go | 13 +- .../informers/externalversions/generic.go | 4 +- .../internalinterfaces/factory_interfaces.go | 2 +- .../externalversions/ipamclaims/interface.go | 2 +- .../ipamclaims/v1alpha1/interface.go | 2 +- .../ipamclaims/v1alpha1/ipamclaim.go | 18 +- .../v1alpha1/expansion_generated.go | 2 +- .../listers/ipamclaims/v1alpha1/ipamclaim.go | 53 ++---- .../pkg/crd/ipamclaims/v1alpha1/types.go | 17 +- .../v1alpha1/zz_generated.deepcopy.go | 24 +++ go-controller/vendor/modules.txt | 10 +- test/e2e/go.mod | 8 +- test/e2e/go.sum | 16 +- 28 files changed, 171 insertions(+), 368 deletions(-) diff --git a/go-controller/go.mod b/go-controller/go.mod index 4b12ddd9b5..72e89c3b7a 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -23,7 +23,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 - github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc @@ -57,9 +57,9 @@ require ( gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 + k8s.io/api v0.32.5 + k8s.io/apimachinery v0.32.5 + k8s.io/client-go v0.32.5 k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.32.6 diff --git a/go-controller/go.sum b/go-controller/go.sum index 436b9bad43..2af1883f7e 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -494,8 +494,8 @@ github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CIm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -1317,8 +1317,8 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= +k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -1326,8 +1326,8 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= +k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1335,8 +1335,8 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= +k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go index f374a5c511..6f4518f097 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ limitations under the License. package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" k8sv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" discovery "k8s.io/client-go/discovery" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go index a67d14acb8..eb8da4c265 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go index 44e8061b76..64c6b6be35 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go index 3cdc1ac5b1..e6f64d71b9 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go index 743391c14b..8514bb55f2 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go index d6a1737fdb..522a30ca3e 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go index faa8377ce2..19ad6aefe7 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go index b38fd4c55d..33fd99c15d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go index 00db990cf9..e410e0b7e3 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,123 +19,32 @@ limitations under the License. package fake import ( - "context" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" + gentype "k8s.io/client-go/gentype" ) -// FakeIPAMClaims implements IPAMClaimInterface -type FakeIPAMClaims struct { +// fakeIPAMClaims implements IPAMClaimInterface +type fakeIPAMClaims struct { + *gentype.FakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList] Fake *FakeK8sV1alpha1 - ns string -} - -var ipamclaimsResource = v1alpha1.SchemeGroupVersion.WithResource("ipamclaims") - -var ipamclaimsKind = v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim") - -// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. -func (c *FakeIPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(ipamclaimsResource, c.ns, name), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. -func (c *FakeIPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(ipamclaimsResource, ipamclaimsKind, c.ns, opts), &v1alpha1.IPAMClaimList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.IPAMClaimList{ListMeta: obj.(*v1alpha1.IPAMClaimList).ListMeta} - for _, item := range obj.(*v1alpha1.IPAMClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested iPAMClaims. -func (c *FakeIPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(ipamclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *FakeIPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *FakeIPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ipamclaimsResource, "status", c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. -func (c *FakeIPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ipamclaimsResource, c.ns, name, opts), &v1alpha1.IPAMClaim{}) - - return err } -// DeleteCollection deletes a collection of objects. -func (c *FakeIPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ipamclaimsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.IPAMClaimList{}) - return err -} - -// Patch applies the patch and returns the patched iPAMClaim. -func (c *FakeIPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ipamclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err +func newFakeIPAMClaims(fake *FakeK8sV1alpha1, namespace string) ipamclaimsv1alpha1.IPAMClaimInterface { + return &fakeIPAMClaims{ + gentype.NewFakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("ipamclaims"), + v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim"), + func() *v1alpha1.IPAMClaim { return &v1alpha1.IPAMClaim{} }, + func() *v1alpha1.IPAMClaimList { return &v1alpha1.IPAMClaimList{} }, + func(dst, src *v1alpha1.IPAMClaimList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.IPAMClaimList) []*v1alpha1.IPAMClaim { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.IPAMClaimList, items []*v1alpha1.IPAMClaim) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.IPAMClaim), err } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go index adc0c545ed..65c4b4c979 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ type FakeK8sV1alpha1 struct { } func (c *FakeK8sV1alpha1) IPAMClaims(namespace string) v1alpha1.IPAMClaimInterface { - return &FakeIPAMClaims{c, namespace} + return newFakeIPAMClaims(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go index c5c3006e82..b70abd3102 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go index bfc26c0c5a..f4d088c1b9 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,15 +19,14 @@ limitations under the License. package v1alpha1 import ( - "context" - "time" + context "context" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // IPAMClaimsGetter has a method to return a IPAMClaimInterface. @@ -38,158 +37,34 @@ type IPAMClaimsGetter interface { // IPAMClaimInterface has methods to work with IPAMClaim resources. type IPAMClaimInterface interface { - Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (*v1alpha1.IPAMClaim, error) - Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) - UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) + Create(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.CreateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + Update(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAMClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAMClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*ipamclaimsv1alpha1.IPAMClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ipamclaimsv1alpha1.IPAMClaim, err error) IPAMClaimExpansion } // iPAMClaims implements IPAMClaimInterface type iPAMClaims struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList] } // newIPAMClaims returns a IPAMClaims func newIPAMClaims(c *K8sV1alpha1Client, namespace string) *iPAMClaims { return &iPAMClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList]( + "ipamclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ipamclaimsv1alpha1.IPAMClaim { return &ipamclaimsv1alpha1.IPAMClaim{} }, + func() *ipamclaimsv1alpha1.IPAMClaimList { return &ipamclaimsv1alpha1.IPAMClaimList{} }, + ), } } - -// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. -func (c *iPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. -func (c *iPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.IPAMClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested iPAMClaims. -func (c *iPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *iPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *iPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(iPAMClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *iPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(iPAMClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. -func (c *iPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *iPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched iPAMClaim. -func (c *iPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go index d6b8684d89..3545777356 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *K8sV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := ipamclaimsv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go index 8ba00a69fc..7efe7e95a6 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -80,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -184,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -218,6 +228,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go index 94f709e9bb..d5dabd6983 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package externalversions import ( - "fmt" + fmt "fmt" v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go index 8d1429d5f3..cb5a445987 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go index c93d99e4be..b2cad1c067 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go index 1ab51a9ed7..455310ee4d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go index fd46dc78b7..8caa586ce5 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + crdipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" versioned "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned" internalinterfaces "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -36,7 +36,7 @@ import ( // IPAMClaims. type IPAMClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.IPAMClaimLister + Lister() ipamclaimsv1alpha1.IPAMClaimLister } type iPAMClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIPAMClaimInformer(client versioned.Interface, namespace string, return client.K8sV1alpha1().IPAMClaims(namespace).Watch(context.TODO(), options) }, }, - &ipamclaimsv1alpha1.IPAMClaim{}, + &crdipamclaimsv1alpha1.IPAMClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *iPAMClaimInformer) defaultInformer(client versioned.Interface, resyncPe } func (f *iPAMClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) + return f.factory.InformerFor(&crdipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) } -func (f *iPAMClaimInformer) Lister() v1alpha1.IPAMClaimLister { - return v1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) +func (f *iPAMClaimInformer) Lister() ipamclaimsv1alpha1.IPAMClaimLister { + return ipamclaimsv1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go index 086ab4ab65..bb37e41381 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go index 409fc70d06..474e11b48e 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IPAMClaimLister helps list IPAMClaims. @@ -30,7 +30,7 @@ import ( type IPAMClaimLister interface { // List lists all IPAMClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) // IPAMClaims returns an object that can list and get IPAMClaims. IPAMClaims(namespace string) IPAMClaimNamespaceLister IPAMClaimListerExpansion @@ -38,25 +38,17 @@ type IPAMClaimLister interface { // iPAMClaimLister implements the IPAMClaimLister interface. type iPAMClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] } // NewIPAMClaimLister returns a new IPAMClaimLister. func NewIPAMClaimLister(indexer cache.Indexer) IPAMClaimLister { - return &iPAMClaimLister{indexer: indexer} -} - -// List lists all IPAMClaims in the indexer. -func (s *iPAMClaimLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAMClaim)) - }) - return ret, err + return &iPAMClaimLister{listers.New[*ipamclaimsv1alpha1.IPAMClaim](indexer, ipamclaimsv1alpha1.Resource("ipamclaim"))} } // IPAMClaims returns an object that can list and get IPAMClaims. func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister { - return iPAMClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return iPAMClaimNamespaceLister{listers.NewNamespaced[*ipamclaimsv1alpha1.IPAMClaim](s.ResourceIndexer, namespace)} } // IPAMClaimNamespaceLister helps list and get IPAMClaims. @@ -64,36 +56,15 @@ func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister type IPAMClaimNamespaceLister interface { // List lists all IPAMClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) // Get retrieves the IPAMClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.IPAMClaim, error) + Get(name string) (*ipamclaimsv1alpha1.IPAMClaim, error) IPAMClaimNamespaceListerExpansion } // iPAMClaimNamespaceLister implements the IPAMClaimNamespaceLister // interface. type iPAMClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all IPAMClaims in the indexer for a given namespace. -func (s iPAMClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAMClaim)) - }) - return ret, err -} - -// Get retrieves the IPAMClaim from the indexer for a given namespace and name. -func (s iPAMClaimNamespaceLister) Get(name string) (*v1alpha1.IPAMClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("ipamclaim"), name) - } - return obj.(*v1alpha1.IPAMClaim), nil + listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go index ca94219215..bb4fc0e97d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go @@ -4,13 +4,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts +//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts -//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.28.0 client-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset .. +//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset --output-dir ./apis/clientset .. -//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.28.0 lister-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers .. +//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-dir ./apis/listers ./ -//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.28.0 informer-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers .. +//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers --output-dir ./apis/informers ./ // +genclient // +kubebuilder:object:root=true @@ -35,9 +35,14 @@ type IPAMClaimSpec struct { Interface string `json:"interface"` } +// IPAMClaimStatus contains the observed status of the IPAMClaim. type IPAMClaimStatus struct { // The list of IP addresses (v4, v6) that were allocated for the pod interface IPs []string `json:"ips"` + // The name of the pod holding the IPAMClaim + OwnerPod OwnerPod `json:"ownerPod,omitempty"` + // Conditions contains details for one aspect of the current state of this API Resource + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -47,3 +52,7 @@ type IPAMClaimList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []IPAMClaim `json:"items"` } + +type OwnerPod struct { + Name string `json:"name"` +} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go index 737efd7a84..d68e38c3ee 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go @@ -5,6 +5,7 @@ package v1alpha1 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -90,6 +91,14 @@ func (in *IPAMClaimStatus) DeepCopyInto(out *IPAMClaimStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + out.OwnerPod = in.OwnerPod + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMClaimStatus. @@ -101,3 +110,18 @@ func (in *IPAMClaimStatus) DeepCopy() *IPAMClaimStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerPod) DeepCopyInto(out *OwnerPod) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerPod. +func (in *OwnerPod) DeepCopy() *OwnerPod { + if in == nil { + return nil + } + out := new(OwnerPod) + in.DeepCopyInto(out) + return out +} diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 8117a157c9..7636490960 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -197,8 +197,8 @@ github.com/juju/errors # github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 ## explicit; go 1.17 github.com/k8snetworkplumbingwg/govdpa/pkg/kvdpa -# github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha -## explicit; go 1.20 +# github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha +## explicit; go 1.23.0 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake @@ -677,7 +677,7 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.3 +# k8s.io/api v0.32.5 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -742,7 +742,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.32.3 +# k8s.io/apimachinery v0.32.5 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -807,7 +807,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.3 +# k8s.io/client-go v0.32.5 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 diff --git a/test/e2e/go.mod b/test/e2e/go.mod index d87e790619..d9d67fb0c4 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -6,16 +6,16 @@ toolchain go1.23.6 require ( github.com/google/go-cmp v0.6.0 - github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 golang.org/x/sync v0.12.0 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 + k8s.io/api v0.32.5 + k8s.io/apimachinery v0.32.5 + k8s.io/client-go v0.32.5 k8s.io/klog v1.0.0 k8s.io/kubernetes v1.32.6 k8s.io/pod-security-admission v0.32.3 diff --git a/test/e2e/go.sum b/test/e2e/go.sum index d8a6c5c80c..900d7aa612 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -331,8 +331,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -986,19 +986,19 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= +k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= +k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= +k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= k8s.io/cloud-provider v0.32.3 h1:WC7KhWrqXsU4b0E4tjS+nBectGiJbr1wuc1TpWXvtZM= k8s.io/cloud-provider v0.32.3/go.mod h1:/fwBfgRPuh16n8vLHT+PPT+Bc4LAEaJYj38opO2wsYY= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= From 5b5bc069fd729ef6921b01ac25e26220f052b9bd Mon Sep 17 00:00:00 2001 From: Alin Serdean Date: Fri, 18 Jul 2025 18:16:22 +0200 Subject: [PATCH 157/181] gateway: Refactor gateway initialization and DPU host handling - Improve DPU host gateway configuration with better IP handling - Fix node interface address handling for DPU mode - Clean up gateway interface selection logic for DPU and DPU-HOST - Update node annotations for DPU host mode - Remove redundant code and improve error handling - Fix typos and improve code readability This change simplifies the gateway initialization flow and improves the handling of DPU (Data Processing Unit) host configurations. The main idea is that the DPU in host mode is the one that dictates how the network looks and the DPU side just follows. Signed-off-by: Alin Serdean --- .../pkg/node/bridgeconfig/bridgeconfig.go | 9 +- .../node/default_node_network_controller.go | 8 +- go-controller/pkg/node/gateway_init.go | 97 +++++++++++++++---- .../pkg/node/gateway_init_linux_test.go | 8 +- .../pkg/node/node_ip_handler_linux.go | 32 ++---- .../pkg/ovn/controller/services/lb_config.go | 10 +- .../pkg/ovn/default_network_controller.go | 10 +- go-controller/pkg/util/node_annotations.go | 39 ++++++++ 8 files changed, 161 insertions(+), 52 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index 4cad9037ad..4031ff3cc8 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -244,12 +244,15 @@ func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]* // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's // host internal IP address instead of the DPU's external bridge IP address. if config.OvnKubeNode.Mode == types.NodeModeDPU { - nodeAddrStr, err := util.GetNodePrimaryIP(node) + nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) if err != nil { return nil, err } - nodeAddr := net.ParseIP(nodeAddrStr) - if nodeAddr == nil { + // For DPU mode, we only support IPv4 for now. + nodeAddrStr := nodeIfAddr.IPv4 + + nodeAddr, _, err := net.ParseCIDR(nodeAddrStr) + if err != nil { return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) } ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index a2bdf34e50..f1281980a8 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -965,8 +965,12 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { // First part of gateway initialization. It will be completed by (nc *DefaultNodeNetworkController) Start() if config.OvnKubeNode.Mode != types.NodeModeDPUHost { + // IPv6 is not supported in DPU enabled nodes, error out if ovnkube is not set in IPv4 mode + if config.IPv6Mode && config.OvnKubeNode.Mode == types.NodeModeDPU { + return fmt.Errorf("IPv6 mode is not supported on a DPU enabled node") + } // Initialize gateway for OVS internal port or representor management port - gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController, nodeAddr) + gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController) if err != nil { return err } @@ -1059,7 +1063,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { netdevName = netdevs[0] config.Gateway.Interface = netdevName } - err = nc.initGatewayDPUHost(nc.nodeAddress) + err = nc.initGatewayDPUHost(nc.nodeAddress, nodeAnnotator) if err != nil { return err } diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 4fe0b244fd..1c2d79c98e 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -9,6 +9,8 @@ import ( "github.com/vishvananda/netlink" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -195,7 +197,6 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( subnets []*net.IPNet, nodeAnnotator kube.Annotator, mgmtPort managementport.Interface, - kubeNodeIP net.IP, ) (*gateway, error) { klog.Info("Initializing Gateway Functionality for Gateway PreStart") @@ -219,13 +220,39 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( return nil, err } - // For DPU need to use the host IP addr which currently is assumed to be K8s Node cluster - // internal IP address. + // For DPU mode, we need to use the host IP address which is stored as a Kubernetes + // node annotation rather than using the gateway interface IP addresses. if config.OvnKubeNode.Mode == types.NodeModeDPU { - ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) + // Retrieve the current node object from the Kubernetes API + var node *corev1.Node + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { + return nil, fmt.Errorf("error retrieving node %s: %v", nc.name, err) + } + + // Extract the primary DPU address annotation from the node + nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) if err != nil { return nil, err } + // For DPU mode, we only support IPv4 for now. + nodeAddrStr := nodeIfAddr.IPv4 + if nodeAddrStr == "" { + return nil, fmt.Errorf("node primary DPU address annotation is empty for node %s", nc.name) + } + + // Parse the IPv4 address string into IP and network components + nodeIP, nodeAddrs, err := net.ParseCIDR(nodeAddrStr) + if err != nil { + return nil, fmt.Errorf("failed to parse node IP address %s: %v", nodeAddrStr, err) + } + + // Set the parsed IP as the network address + nodeAddrs.IP = nodeIP + + // Create a new slice and replace ifAddrs with the DPU host address + // This overrides the gateway interface addresses for DPU mode + var gwIps []*net.IPNet + ifAddrs = append(gwIps, nodeAddrs) } if err := util.SetNodePrimaryIfAddrs(nodeAnnotator, ifAddrs); err != nil { @@ -359,7 +386,7 @@ func interfaceForEXGW(intfName string) string { return intfName } -func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) error { +func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, nodeAnnotator kube.Annotator) error { // A DPU host gateway is complementary to the shared gateway running // on the DPU embedded CPU. it performs some initializations and // watch on services for iptable rule updates and run a loadBalancerHealth checker @@ -367,35 +394,71 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er klog.Info("Initializing Shared Gateway Functionality on DPU host") var err error - // Force gateway interface to be the interface associated with kubeNodeIP - gwIntf, err := getInterfaceByIP(kubeNodeIP) + // Find the network interface that has the Kubernetes node IP assigned to it + // This interface will be used for DPU host gateway operations + kubeIntf, err := getInterfaceByIP(kubeNodeIP) if err != nil { return err } - config.Gateway.Interface = gwIntf - _, gatewayIntf, err := getGatewayNextHops() + // Get all IP addresses (IPv4 and IPv6) configured on the detected interface + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(kubeIntf) if err != nil { return err } - ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) - if err != nil { + // Extract the IPv4 address from the interface addresses for node annotation + nodeIPNet, _ := util.MatchFirstIPNetFamily(false, ifAddrs) + nodeAddrSet := sets.New[string](nodeIPNet.String()) + + // If no gateway interface is explicitly configured, use the detected interface + if config.Gateway.Interface == "" { + config.Gateway.Interface = kubeIntf + } + + // If a different gateway interface is configured than the one with used for the kubernetes node IP, + // get its addresses and add them to the node address set for routing purposes + if config.Gateway.Interface != kubeIntf { + ifAddrs, err = nodeutil.GetNetworkInterfaceIPAddresses(config.Gateway.Interface) + if err != nil { + return err + } + detectedIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) + nodeAddrSet.Insert(detectedIPNetv4.String()) + // Use the configured interface for the masquerade route instead of the auto-detected one + kubeIntf = config.Gateway.Interface + } + + // Set the primary DPU address annotation on the node with the interface addresses + if err := util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ifAddrs); err != nil { + klog.Errorf("Unable to set primary IP net label on node, err: %v", err) + return err + } + + // Set the host CIDRs annotation to include all detected network addresses + // This helps with routing decisions for traffic coming from the host + if err := util.SetNodeHostCIDRs(nodeAnnotator, nodeAddrSet); err != nil { + klog.Errorf("Unable to set host-cidrs on node, err: %v", err) return err } + // Apply all node annotations to the Kubernetes node object + if err := nodeAnnotator.Run(); err != nil { + return fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) + } + // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwIntf, nc.name, nc.watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(kubeIntf, nc.name, nc.watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwIntf); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwIntf, err) + if err := setNodeMasqueradeIPOnExtBridge(kubeIntf); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", kubeIntf, err) } - if err := addMasqueradeRoute(nc.routeManager, gwIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { + if err := addMasqueradeRoute(nc.routeManager, kubeIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -404,7 +467,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er return fmt.Errorf("failed to update masquerade subnet annotation on node: %s, error: %v", nc.name, err) } - err = configureSvcRouteViaInterface(nc.routeManager, gatewayIntf, DummyNextHopIPs()) + err = configureSvcRouteViaInterface(nc.routeManager, config.Gateway.Interface, DummyNextHopIPs()) if err != nil { return err } @@ -430,7 +493,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er gw.portClaimWatcher = portClaimWatcher } - if err := addHostMACBindings(gwIntf); err != nil { + if err := addHostMACBindings(kubeIntf); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing") } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 9e1fc9213c..6e8aadc0f5 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -725,6 +725,9 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, k := &kube.Kube{KClient: kubeFakeClient} nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) + err = util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) + config.Gateway.RouterSubnet = nodeSubnet + Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) @@ -893,8 +896,11 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + k := &kube.Kube{KClient: kubeFakeClient} + + nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) - err := nc.initGatewayDPUHost(net.ParseIP(hostIP)) + err := nc.initGatewayDPUHost(net.ParseIP(hostIP), nodeAnnotator) Expect(err).NotTo(HaveOccurred()) link, err := netlink.LinkByName(uplinkName) diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index 770ec5924e..dcbbbfc7d6 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -65,27 +65,11 @@ func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort manag } mgr.nodeAnnotator = kube.NewNodeAnnotator(k, nodeName) if config.OvnKubeNode.Mode == types.NodeModeDPU { - var ifAddrs []*net.IPNet - - // update k8s.ovn.org/host-cidrs - node, err := watchFactory.GetNode(nodeName) - if err != nil { - klog.Errorf("Failed to get node %s: %v", nodeName, err) - return nil - } - if useNetlink { - // get updated interface IP addresses for the gateway bridge - ifAddrs, err = gwBridge.UpdateInterfaceIPAddresses(node) - if err != nil { - klog.Errorf("Failed to obtain interface IP addresses for node %s: %v", nodeName, err) - return nil - } - } - if err = mgr.updateHostCIDRs(ifAddrs); err != nil { + if err := mgr.updateHostCIDRs(); err != nil { klog.Errorf("Failed to update host-cidrs annotations on node %s: %v", nodeName, err) return nil } - if err = mgr.nodeAnnotator.Run(); err != nil { + if err := mgr.nodeAnnotator.Run(); err != nil { klog.Errorf("Failed to set host-cidrs annotations on node %s: %v", nodeName, err) return nil } @@ -286,7 +270,7 @@ func (c *addressManager) updateNodeAddressAnnotations() error { } // update k8s.ovn.org/host-cidrs - if err = c.updateHostCIDRs(ifAddrs); err != nil { + if err = c.updateHostCIDRs(); err != nil { return err } @@ -316,14 +300,10 @@ func (c *addressManager) updateNodeAddressAnnotations() error { return nil } -func (c *addressManager) updateHostCIDRs(ifAddrs []*net.IPNet) error { +func (c *addressManager) updateHostCIDRs() error { if config.OvnKubeNode.Mode == types.NodeModeDPU { - // For DPU mode, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead. - // Currently we are only intentionally supporting IPv4 for DPU here. - nodeIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) - nodeAddrSet := sets.New[string](nodeIPNetv4.String()) - return util.SetNodeHostCIDRs(c.nodeAnnotator, nodeAddrSet) + // For DPU mode, we don't need to update the host-cidrs annotation. + return nil } return util.SetNodeHostCIDRs(c.nodeAnnotator, c.cidrs) diff --git a/go-controller/pkg/ovn/controller/services/lb_config.go b/go-controller/pkg/ovn/controller/services/lb_config.go index 7cd2238e81..461827041c 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config.go +++ b/go-controller/pkg/ovn/controller/services/lb_config.go @@ -91,10 +91,16 @@ func makeNodeRouterTargetIPs(node *nodeInfo, c *lbConfig, hostMasqueradeIPV4, ho targetIPsV6 = localIPsV6 } + // TODO: For all scenarios the lbAddress should be set to hostAddressesStr but this is breaking CI needs more investigation + lbAddresses := node.hostAddressesStr() + if config.OvnKubeNode.Mode == types.NodeModeFull { + lbAddresses = node.l3gatewayAddressesStr() + } + // Any targets local to the node need to have a special // harpin IP added, but only for the router LB - targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV4}) - targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV6}) + targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, lbAddresses, []string{hostMasqueradeIPV4}) + targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, lbAddresses, []string{hostMasqueradeIPV6}) // Local endpoints are a subset of cluster endpoints, so it is enough to compare their length v4Changed = len(targetIPsV4) != len(c.clusterEndpoints.V4IPs) || v4Updated diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index ed79067e8e..26ad651206 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -949,6 +949,7 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode, types.DefaultNetworkName) nodeSubnetChange := nodeSubnetChanged(oldNode, newNode, types.DefaultNetworkName) nodeEncapIPsChanged := util.NodeEncapIPsChanged(oldNode, newNode) + nodePrimaryDPUHostAddrChanged := util.NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode) var aggregatedErrors []error if newNodeIsLocalZoneNode { @@ -1006,11 +1007,18 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int // Also check if node subnet changed, so static routes are properly set // Also check if the node is used to be a hybrid overlay node syncZoneIC = syncZoneIC || h.oc.isLocalZoneNode(oldNode) || nodeSubnetChange || zoneClusterChanged || - switchToOvnNode || nodeEncapIPsChanged + switchToOvnNode || nodeEncapIPsChanged || nodePrimaryDPUHostAddrChanged if syncZoneIC { klog.Infof("Node %q in remote zone %q, network %q, needs interconnect zone sync up. Zone cluster changed: %v", newNode.Name, util.GetNodeZone(newNode), h.oc.GetNetworkName(), zoneClusterChanged) } + // Reprovisioning the DPU (including OVS), which is pinned to a host, will change the system ID but not the node. + if config.OvnKubeNode.Mode == types.NodeModeDPU && nodeChassisChanged(oldNode, newNode) { + if err := h.oc.zoneChassisHandler.DeleteRemoteZoneNode(oldNode); err != nil { + aggregatedErrors = append(aggregatedErrors, err) + } + syncZoneIC = true + } if err := h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC); err != nil { aggregatedErrors = append(aggregatedErrors, err) } diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index 4e9a984748..cddd754d60 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -97,6 +97,9 @@ const ( // OVNNodeHostCIDRs is used to track the different host IP addresses and subnet masks on the node OVNNodeHostCIDRs = "k8s.ovn.org/host-cidrs" + // OVNNodePrimaryDPUHostAddr is used to track the primary DPU host address on the node + OVNNodePrimaryDPUHostAddr = "k8s.ovn.org/primary-dpu-host-addr" + // OVNNodeSecondaryHostEgressIPs contains EgressIP addresses that aren't managed by OVN. The EIP addresses are assigned to // standard linux interfaces and not interfaces of type OVS. OVNNodeSecondaryHostEgressIPs = "k8s.ovn.org/secondary-host-egress-ips" @@ -1534,3 +1537,39 @@ func ParseNodeEncapIPsAnnotation(node *corev1.Node) ([]string, error) { func NodeEncapIPsChanged(oldNode, newNode *corev1.Node) bool { return oldNode.Annotations[OVNNodeEncapIPs] != newNode.Annotations[OVNNodeEncapIPs] } + +// SetNodePrimaryDPUHostAddr sets the primary DPU host address annotation on a node +func SetNodePrimaryDPUHostAddr(nodeAnnotator kube.Annotator, ifAddrs []*net.IPNet) error { + nodeIPNetv4, _ := MatchFirstIPNetFamily(false, ifAddrs) + nodeIPNetv6, _ := MatchFirstIPNetFamily(true, ifAddrs) + + ifAddrAnnotation := ifAddr{} + if nodeIPNetv4 != nil { + ifAddrAnnotation.IPv4 = nodeIPNetv4.String() + } + if nodeIPNetv6 != nil { + ifAddrAnnotation.IPv6 = nodeIPNetv6.String() + } + return nodeAnnotator.Set(OVNNodePrimaryDPUHostAddr, ifAddrAnnotation) +} + +// NodePrimaryDPUHostAddrAnnotationChanged returns true if the primary DPU host address annotation changed +func NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode *corev1.Node) bool { + return oldNode.Annotations[OVNNodePrimaryDPUHostAddr] != newNode.Annotations[OVNNodePrimaryDPUHostAddr] +} + +// GetNodePrimaryDPUHostAddrAnnotation returns the raw primary DPU host address annotation from a node +func GetNodePrimaryDPUHostAddrAnnotation(node *corev1.Node) (*ifAddr, error) { + addrAnnotation, ok := node.Annotations[OVNNodePrimaryDPUHostAddr] + if !ok { + return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodePrimaryDPUHostAddr, node.Name) + } + nodeIfAddr := &ifAddr{} + if err := json.Unmarshal([]byte(addrAnnotation), nodeIfAddr); err != nil { + return nil, fmt.Errorf("failed to unmarshal annotation: %s for node %q, err: %v", OVNNodePrimaryDPUHostAddr, node.Name, err) + } + if nodeIfAddr.IPv4 == "" && nodeIfAddr.IPv6 == "" { + return nil, fmt.Errorf("node: %q does not have any IP information set", node.Name) + } + return nodeIfAddr, nil +} From 45bf0b39b008377b9b7eac8665d48b614bcc386e Mon Sep 17 00:00:00 2001 From: Enrique Llorente Date: Mon, 21 Jul 2025 12:23:47 +0200 Subject: [PATCH 158/181] Revert "e2e: Use ovnk allocator and reserve IPs" This reverts commit 9fed90c790221bd4b925ba2a4ad57784a1cbea3f. Signed-off-by: Enrique Llorente --- test/e2e/ipalloc/ipalloc.go | 47 +++++++ test/e2e/ipalloc/primaryipalloc.go | 166 +++++++++++++++++++----- test/e2e/ipalloc/primaryipalloc_test.go | 90 ++++++++----- 3 files changed, 238 insertions(+), 65 deletions(-) create mode 100644 test/e2e/ipalloc/ipalloc.go diff --git a/test/e2e/ipalloc/ipalloc.go b/test/e2e/ipalloc/ipalloc.go new file mode 100644 index 0000000000..7decbaa0a1 --- /dev/null +++ b/test/e2e/ipalloc/ipalloc.go @@ -0,0 +1,47 @@ +package ipalloc + +import ( + "fmt" + "math/big" + "net" +) + +type ipAllocator struct { + net *net.IPNet + // base is a cached version of the start IP in the CIDR range as a *big.Int + base *big.Int + // max is the maximum size of the usable addresses in the range + max int + count int +} + +func newIPAllocator(cidr *net.IPNet) *ipAllocator { + return &ipAllocator{net: cidr, base: getBaseInt(cidr.IP), max: limit(cidr)} +} + +func (n *ipAllocator) AllocateNextIP() (net.IP, error) { + if n.count >= n.max { + return net.IP{}, fmt.Errorf("limit of %d reached", n.max) + } + n.base.Add(n.base, big.NewInt(1)) + n.count += 1 + b := n.base.Bytes() + b = append(make([]byte, 16), b...) + return b[len(b)-16:], nil +} + +func getBaseInt(ip net.IP) *big.Int { + return big.NewInt(0).SetBytes(ip.To16()) +} + +func limit(subnet *net.IPNet) int { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // limit to 2^8 (256) IPs for e2es + if bits == 128 && (bits-ones) >= 8 { + return int(1) << uint(8) + } + return int(1) << uint(bits-ones) +} diff --git a/test/e2e/ipalloc/primaryipalloc.go b/test/e2e/ipalloc/primaryipalloc.go index 1e7c34bb87..79a0ae5010 100644 --- a/test/e2e/ipalloc/primaryipalloc.go +++ b/test/e2e/ipalloc/primaryipalloc.go @@ -3,20 +3,19 @@ package ipalloc import ( "context" "fmt" - "net" - "sync" - - ipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/client-go/kubernetes/typed/core/v1" + "net" + "sync" ) // primaryIPAllocator attempts to allocate an IP in the same subnet as a nodes primary network type primaryIPAllocator struct { mu *sync.Mutex - v4 *ipallocator.Range - v6 *ipallocator.Range + v4 *ipAllocator + v6 *ipAllocator nodeClient v1.NodeInterface } @@ -48,37 +47,91 @@ func newPrimaryIPAllocator(nodeClient v1.NodeInterface) (*primaryIPAllocator, er if len(nodes.Items) == 0 { return ipa, fmt.Errorf("expected at least one node but found zero") } + // FIXME: the approach taken here to find the first node IP+mask and then to increment the second last octet wont work in + // all scenarios (node with /24). We should generate an EgressIP compatible with a Node providers primary network and then take care its unique globally. - for _, node := range nodes.Items { + // The approach here is to grab initial starting IP from first node found, increment the second last octet. + // Approach taken here won't work for Nodes handed /24 subnets. + nodePrimaryIPs, err := util.ParseNodePrimaryIfAddr(&nodes.Items[0]) + if err != nil { + return ipa, fmt.Errorf("failed to parse node primary interface address from Node object: %v", err) + } + if nodePrimaryIPs.V4.IP != nil { + // should be ok with /16 and /64 node primary provider subnets + // TODO; fixme; what about /24 subnet Nodes like GCP + nodePrimaryIPs.V4.IP[len(nodePrimaryIPs.V4.IP)-2]++ + ipa.v4 = newIPAllocator(&net.IPNet{IP: nodePrimaryIPs.V4.IP, Mask: nodePrimaryIPs.V4.Net.Mask}) + } + if nodePrimaryIPs.V6.IP != nil { + nodePrimaryIPs.V6.IP[len(nodePrimaryIPs.V6.IP)-2]++ + ipa.v6 = newIPAllocator(&net.IPNet{IP: nodePrimaryIPs.V6.IP, Mask: nodePrimaryIPs.V6.Net.Mask}) + } + // verify the new starting base IP is within all Nodes subnets + if nodePrimaryIPs.V4.IP != nil { + ipNets, err := getNodePrimaryProviderIPs(nodes.Items, false) + if err != nil { + return ipa, err + } + nextIP, err := ipa.v4.AllocateNextIP() + if err != nil { + return ipa, err + } + if !isIPWithinAllSubnets(ipNets, nextIP) { + return ipa, fmt.Errorf("IP %s is not within all Node subnets", nextIP) + } + } + if nodePrimaryIPs.V6.IP != nil { + ipNets, err := getNodePrimaryProviderIPs(nodes.Items, true) + if err != nil { + return ipa, err + } + nextIP, err := ipa.v6.AllocateNextIP() + if err != nil { + return ipa, err + } + if !isIPWithinAllSubnets(ipNets, nextIP) { + return ipa, fmt.Errorf("IP %s is not within all Node subnets", nextIP) + } + } + + return ipa, nil +} + +func getNodePrimaryProviderIPs(nodes []corev1.Node, isIPv6 bool) ([]*net.IPNet, error) { + ipNets := make([]*net.IPNet, 0, len(nodes)) + for _, node := range nodes { nodePrimaryIPs, err := util.ParseNodePrimaryIfAddr(&node) if err != nil { - return ipa, fmt.Errorf("failed to parse node primary interface address from Node %s object: %v", node.Name, err) - } - if nodePrimaryIPs.V4.IP != nil { - if ipa.v4 == nil { - ipa.v4, err = ipallocator.NewCIDRRange(nodePrimaryIPs.V4.Net) - if err != nil { - return ipa, fmt.Errorf("failed to create new CIDR range for IPv4: %v", err) - } - } - if err := ipa.v4.Allocate(nodePrimaryIPs.V4.IP); err != nil { - return ipa, fmt.Errorf("failed to allocate IPv4 %s: %v", nodePrimaryIPs.V4.IP, err) - } - } - if nodePrimaryIPs.V6.IP != nil { - if ipa.v6 == nil { - ipa.v6, err = ipallocator.NewCIDRRange(nodePrimaryIPs.V6.Net) - if err != nil { - return ipa, fmt.Errorf("failed to create new CIDR range for IPv6: %v", err) - } - } - if err := ipa.v6.Allocate(nodePrimaryIPs.V6.IP); err != nil { - return ipa, fmt.Errorf("failed to allocate IPv6 %s: %v", nodePrimaryIPs.V6.IP, err) - } + return nil, fmt.Errorf("failed to parse node primary interface address from Node %s object: %v", node.Name, err) } + var mask net.IPMask + var ip net.IP + if isIPv6 { + ip = nodePrimaryIPs.V6.IP + mask = nodePrimaryIPs.V6.Net.Mask + } else { + ip = nodePrimaryIPs.V4.IP + mask = nodePrimaryIPs.V4.Net.Mask + } + if len(ip) == 0 || len(mask) == 0 { + return nil, fmt.Errorf("failed to find Node %s primary Node IP and/or mask", node.Name) + } + ipNets = append(ipNets, &net.IPNet{IP: ip, Mask: mask}) } - return ipa, nil + return ipNets, nil +} + +func isIPWithinAllSubnets(ipNets []*net.IPNet, ip net.IP) bool { + if len(ipNets) == 0 { + return false + } + for _, ipNet := range ipNets { + if !ipNet.Contains(ip) { + return false + } + } + return true } func (pia *primaryIPAllocator) IncrementAndGetNextV4(times int) (net.IP, error) { @@ -95,9 +148,12 @@ func (pia *primaryIPAllocator) AllocateNextV4() (net.IP, error) { if pia.v4 == nil { return nil, fmt.Errorf("IPv4 is not enable ") } + if pia.v4.net == nil { + return nil, fmt.Errorf("IPv4 is not enabled but Allocation request was called") + } pia.mu.Lock() defer pia.mu.Unlock() - return pia.v4.AllocateNext() + return allocateIP(pia.nodeClient, pia.v4.AllocateNextIP) } func (pia *primaryIPAllocator) IncrementAndGetNextV6(times int) (net.IP, error) { @@ -114,7 +170,51 @@ func (pia primaryIPAllocator) AllocateNextV6() (net.IP, error) { if pia.v6 == nil { return nil, fmt.Errorf("IPv6 is not enabled but Allocation request was called") } + if pia.v6.net == nil { + return nil, fmt.Errorf("ipv6 network is not set") + } pia.mu.Lock() defer pia.mu.Unlock() - return pia.v6.AllocateNext() + return allocateIP(pia.nodeClient, pia.v6.AllocateNextIP) +} + +type allocNextFn func() (net.IP, error) + +func allocateIP(nodeClient v1.NodeInterface, allocateFn allocNextFn) (net.IP, error) { + nodeList, err := nodeClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %v", err) + } + for { + nextIP, err := allocateFn() + if err != nil { + return nil, fmt.Errorf("failed to allocated next IP address: %v", err) + } + firstOctet := nextIP[len(nextIP)-1] + // skip 0 and 1 + if firstOctet == 0 || firstOctet == 1 { + continue + } + isConflict, err := isConflictWithExistingHostIPs(nodeList.Items, nextIP) + if err != nil { + return nil, fmt.Errorf("failed to determine if IP conflicts with existing IPs: %v", err) + } + if !isConflict { + return nextIP, nil + } + } +} + +func isConflictWithExistingHostIPs(nodes []corev1.Node, ip net.IP) (bool, error) { + ipStr := ip.String() + for _, node := range nodes { + nodeIPsSet, err := util.ParseNodeHostCIDRsDropNetMask(&node) + if err != nil { + return false, fmt.Errorf("failed to parse node %s primary annotation info: %v", node.Name, err) + } + if nodeIPsSet.Has(ipStr) { + return true, nil + } + } + return false, nil } diff --git a/test/e2e/ipalloc/primaryipalloc_test.go b/test/e2e/ipalloc/primaryipalloc_test.go index 1702afe545..815915b7ea 100644 --- a/test/e2e/ipalloc/primaryipalloc_test.go +++ b/test/e2e/ipalloc/primaryipalloc_test.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" + utilsnet "k8s.io/utils/net" ) func TestUtilSuite(t *testing.T) { @@ -22,6 +23,40 @@ func TestUtilSuite(t *testing.T) { ginkgo.RunSpecs(t, "node ip alloc suite") } +func TestAllocateNext(t *testing.T) { + tests := []struct { + desc string + input *net.IPNet + output []net.IP + }{ + { + desc: "increments IPv4 address", + input: mustParseCIDRIncIP("192.168.1.5/16"), // mask /24 would fail + output: []net.IP{net.ParseIP("192.168.1.6"), net.ParseIP("192.168.1.7"), net.ParseIP("192.168.1.8")}, + }, + { + desc: "increments IPv6 address", + input: mustParseCIDRIncIP("fc00:f853:ccd:e793::6/64"), + output: []net.IP{net.ParseIP("fc00:f853:ccd:e793::7"), net.ParseIP("fc00:f853:ccd:e793::8"), net.ParseIP("fc00:f853:ccd:e793::9")}, + }, + } + + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + nodeIPAlloc := newIPAllocator(tc.input) + for _, expectedIP := range tc.output { + allocatedIP, err := nodeIPAlloc.AllocateNextIP() + if err != nil { + t.Errorf("failed to allocated next IP: %v", err) + } + if !allocatedIP.Equal(expectedIP) { + t.Errorf("Expected IP %q, but got %q", expectedIP.String(), allocatedIP.String()) + } + } + }) + } +} + // mustParseCIDRIncIP parses the IP and CIDR. It adds the IP to the returned IPNet. func mustParseCIDRIncIP(cidr string) *net.IPNet { ip, ipNet, err := net.ParseCIDR(cidr) @@ -43,19 +78,20 @@ type node struct { } func TestIPAlloc(t *testing.T) { - g := gomega.NewWithT(t) - tests := []struct { - desc string - existingPrimaryNodeIPs []node + desc string + existingPrimaryNodeIPs []node + expectedFromAllocateNext []string }{ { - desc: "IPv4", - existingPrimaryNodeIPs: []node{{v4: network{ip: "192.168.1.1", mask: "16"}}, {v4: network{ip: "192.168.1.2", mask: "16"}}}, + desc: "IPv4", + existingPrimaryNodeIPs: []node{{v4: network{ip: "192.168.1.1", mask: "16"}}, {v4: network{ip: "192.168.1.2", mask: "16"}}}, + expectedFromAllocateNext: []string{"192.168.2.3", "192.168.2.4"}, }, { - desc: "IPv6", - existingPrimaryNodeIPs: []node{{v6: network{ip: "fc00:f853:ccd:e793::5", mask: "64"}}, {v6: network{ip: "fc00:f853:ccd:e793::6", mask: "64"}}}, + desc: "IPv6", + existingPrimaryNodeIPs: []node{{v4: network{ip: "fc00:f853:ccd:e793::5", mask: "64"}}, {v4: network{ip: "fc00:f853:ccd:e793::6", mask: "64"}}}, + expectedFromAllocateNext: []string{"fc00:f853:ccd:e793::8", "fc00:f853:ccd:e793::9"}, }, } @@ -67,32 +103,22 @@ func TestIPAlloc(t *testing.T) { t.Errorf(err.Error()) return } - existingIPv4IPs := []string{} - existingIPv6IPs := []string{} - allocatedIPv4IPs := []string{} - allocatedIPv6IPs := []string{} - for _, existingPrimaryNodeIP := range tc.existingPrimaryNodeIPs { - if existingPrimaryNodeIP.v4.ip != "" { - existingIPv4IPs = append(existingIPv4IPs, existingPrimaryNodeIP.v4.ip) - nextIPv4, err := pipa.AllocateNextV4() - g.Expect(err).ToNot(gomega.HaveOccurred(), "should succeed in allocating the next IPv4 address") - g.Expect(nextIPv4).ToNot(gomega.BeNil(), "should allocate next IPv4 address") - allocatedIPv4IPs = append(allocatedIPv4IPs, nextIPv4.String()) + for _, expectedIPStr := range tc.expectedFromAllocateNext { + expectedIP := net.ParseIP(expectedIPStr) + var nextIP net.IP + var err error + if utilsnet.IsIPv6(expectedIP) { + nextIP, err = pipa.AllocateNextV6() + } else { + nextIP, err = pipa.AllocateNextV4() } - - if existingPrimaryNodeIP.v6.ip != "" { - existingIPv6IPs = append(existingIPv6IPs, existingPrimaryNodeIP.v6.ip) - nextIPv6, err := pipa.AllocateNextV6() - g.Expect(err).ToNot(gomega.HaveOccurred(), "should succeed in allocating the next IPv6 address") - g.Expect(nextIPv6).ToNot(gomega.BeNil(), "should allocate next IPv6 address") - allocatedIPv6IPs = append(allocatedIPv6IPs, nextIPv6.String()) + if err != nil || nextIP == nil { + t.Errorf("failed to allocated next IPv4 or IPv6 address. err %v", err) + return + } + if !nextIP.Equal(expectedIP) { + t.Errorf("expected IP %q, but found %q", expectedIP, nextIP) } - } - if len(existingIPv4IPs) > 0 { - g.Expect(allocatedIPv4IPs).NotTo(gomega.ContainElements(existingIPv4IPs)) - } - if len(existingIPv6IPs) > 0 { - g.Expect(allocatedIPv6IPs).NotTo(gomega.ContainElements(existingIPv6IPs)) } }) } From 6c4bc78badd59ea8f0e5c15c61d74e61dcad3612 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Tue, 22 Jul 2025 10:21:57 +0000 Subject: [PATCH 159/181] e2e: label RouteAdvertisement test cases & skip extended ones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jaime Caamaño Ruiz --- test/e2e/feature/features.go | 1 + test/e2e/label/label.go | 8 ++++++++ test/e2e/route_advertisements.go | 20 ++++++++++++-------- test/scripts/e2e-cp.sh | 14 ++++++++++++++ 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/test/e2e/feature/features.go b/test/e2e/feature/features.go index 842b0474e6..e7c3920477 100644 --- a/test/e2e/feature/features.go +++ b/test/e2e/feature/features.go @@ -23,6 +23,7 @@ var ( MultiHoming = New("MultiHoming") NodeIPMACMigration = New("NodeIPMACMigration") OVSCPUPin = New("OVSCPUPin") + RouteAdvertisements = New("RouteAdvertisements") Unidle = New("Unidle") ) diff --git a/test/e2e/label/label.go b/test/e2e/label/label.go index 6f81c9ceb1..61448bf930 100644 --- a/test/e2e/label/label.go +++ b/test/e2e/label/label.go @@ -40,3 +40,11 @@ func processOverrides(s string) string { } return overRide } + +// Extended returns a label used to label extended feature tests. This label +// might be used to label feature tests that are considered not to be testing +// the core functionality of a feature and that might be filtered out for +// various reasons like for example to keep selected job run times down. +func Extended() ginkgo.Labels { + return ginkgo.Label("EXTENDED") +} diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 59b3eb4a0b..f65dd60631 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -21,9 +21,11 @@ import ( udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -53,7 +55,7 @@ const ( bgpExternalNetworkName = "bgpnet" ) -var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", func() { +var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", feature.RouteAdvertisements, func() { var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -248,7 +250,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is }) }) -var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", func() { +var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", feature.RouteAdvertisements, func() { var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -525,7 +527,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ) }) -var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", +var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", feature.RouteAdvertisements, func(cudnATemplate, cudnBTemplate *udnv1.ClusterUserDefinedNetwork) { const curlConnectionTimeoutCode = "28" const ( @@ -1068,7 +1070,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" ), ) -var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { +var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", feature.RouteAdvertisements, func() { // testing helpers used throughout this testing node const ( @@ -1545,12 +1547,14 @@ var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", func() { otherNetworksToTest := []ginkgo.TableEntry{ ginkgo.Entry("Default", defaultNetwork, nil), - ginkgo.Entry("Layer 3 UDN non advertised", udn, otherLayer3NetworkSpec), - ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec), ginkgo.Entry("Layer 3 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer3NetworkSpec), - ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec), - ginkgo.Entry("Layer 2 CUDN advertised", cudnAdvertised, otherLayer2NetworkSpec), ginkgo.Entry("Layer 2 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer2NetworkSpec), + // The following testcases are labeled as extended, + // might not be run on all jobs + ginkgo.Entry("Layer 3 UDN non advertised", udn, otherLayer3NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 2 CUDN advertised", cudnAdvertised, otherLayer2NetworkSpec, label.Extended()), } ginkgo.DescribeTableSubtree("Of type", diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index cbccc5ee29..096debe8a6 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -40,6 +40,14 @@ skip() { SKIPPED_TESTS+=$* } +SKIPPED_LABELED_TESTS="" +skip_label() { + if [ "$SKIPPED_LABELED_TESTS" != "" ]; then + SKIPPED_LABELED_TESTS+=" && " + fi + SKIPPED_LABELED_TESTS+="!($*)" +} + if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # No support for these features in dual-stack yet @@ -138,6 +146,11 @@ if [ "$ENABLE_ROUTE_ADVERTISEMENTS" != true ]; then skip $BGP_TESTS else if [ "$ADVERTISE_DEFAULT_NETWORK" = true ]; then + # Filter out extended RouteAdvertisements tests to keep job run time down + if [ "$ENABLE_NETWORK_SEGMENTATION" = true ]; then + skip_label "Feature:RouteAdvertisements && EXTENDED" + fi + # Some test don't work when the default network is advertised, either because # the configuration that the test excercises does not make sense for an advertised network, or # there is some bug or functional gap @@ -203,6 +216,7 @@ go test -test.timeout ${GO_TEST_TIMEOUT}m -v . \ -ginkgo.timeout ${TEST_TIMEOUT}m \ -ginkgo.flake-attempts ${FLAKE_ATTEMPTS:-2} \ -ginkgo.skip="${SKIPPED_TESTS}" \ + ${SKIPPED_LABELED_TESTS:+-ginkgo.label-filter="${SKIPPED_LABELED_TESTS}"} \ -ginkgo.junit-report=${E2E_REPORT_DIR}/junit_${E2E_REPORT_PREFIX}report.xml \ -provider skeleton \ -kubeconfig ${KUBECONFIG} \ From b90abc54e7a210c189336803e5c541538c64c5df Mon Sep 17 00:00:00 2001 From: Alin Serdean Date: Tue, 22 Jul 2025 17:30:06 +0200 Subject: [PATCH 160/181] fix: skip gw IP check for DPU and improve gateway initialization readability Skip GW IP check in case ovnkube is running in DPU mode. Extract interface address logic into dedicated helper function to improve code readability and maintainability in gateway initialization. - Add getInterfaceAddressesForNodeMode() helper function - Replace conditional logic with switch statement for better extensibility - Simplify initGatewayPreStart() by removing duplicate error handling - Improve code organization and reduce cognitive complexity Signed-off-by: Alin Gabriel Serdean Signed-off-by: Alin Serdean --- go-controller/pkg/node/gateway_init.go | 72 +++++++++++++------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index 1c2d79c98e..b4d11d69cf 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -9,7 +9,6 @@ import ( "github.com/vishvananda/netlink" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -188,6 +187,39 @@ func configureSvcRouteViaInterface(routeManager *routemanager.Controller, iface return nil } +// getNodePrimaryIfAddrs returns the appropriate interface addresses based on the node mode +func getNodePrimaryIfAddrs(watchFactory factory.NodeWatchFactory, nodeName string, gatewayIntf string) ([]*net.IPNet, error) { + switch config.OvnKubeNode.Mode { + case types.NodeModeDPU: + // For DPU mode, use the host IP address from node annotation + node, err := watchFactory.GetNode(nodeName) + if err != nil { + return nil, fmt.Errorf("error retrieving node %s: %v", nodeName, err) + } + + // Extract the primary DPU address annotation from the node + nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) + if err != nil { + return nil, err + } + + if nodeIfAddr.IPv4 == "" { + return nil, fmt.Errorf("node primary DPU address annotation is empty for node %s", nodeName) + } + + nodeIP, nodeAddrs, err := net.ParseCIDR(nodeIfAddr.IPv4) + if err != nil { + return nil, fmt.Errorf("failed to parse node IP address %s: %v", nodeIfAddr.IPv4, err) + } + + nodeAddrs.IP = nodeIP + return []*net.IPNet{nodeAddrs}, nil + default: + // For other modes, get network interface IP addresses directly + return nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) + } +} + // initGatewayPreStart executes the first part of the gateway initialization for the node. // It creates the gateway object, the node IP manager, openflow manager and node port watcher // once OVN controller is ready and the patch port exists for this node. @@ -215,46 +247,12 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( egressGWInterface = interfaceForEXGW(config.Gateway.EgressGWInterface) } - ifAddrs, err = nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) + // Get interface addresses based on node mode + ifAddrs, err = getNodePrimaryIfAddrs(nc.watchFactory, nc.name, gatewayIntf) if err != nil { return nil, err } - // For DPU mode, we need to use the host IP address which is stored as a Kubernetes - // node annotation rather than using the gateway interface IP addresses. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - // Retrieve the current node object from the Kubernetes API - var node *corev1.Node - if node, err = nc.watchFactory.GetNode(nc.name); err != nil { - return nil, fmt.Errorf("error retrieving node %s: %v", nc.name, err) - } - - // Extract the primary DPU address annotation from the node - nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) - if err != nil { - return nil, err - } - // For DPU mode, we only support IPv4 for now. - nodeAddrStr := nodeIfAddr.IPv4 - if nodeAddrStr == "" { - return nil, fmt.Errorf("node primary DPU address annotation is empty for node %s", nc.name) - } - - // Parse the IPv4 address string into IP and network components - nodeIP, nodeAddrs, err := net.ParseCIDR(nodeAddrStr) - if err != nil { - return nil, fmt.Errorf("failed to parse node IP address %s: %v", nodeAddrStr, err) - } - - // Set the parsed IP as the network address - nodeAddrs.IP = nodeIP - - // Create a new slice and replace ifAddrs with the DPU host address - // This overrides the gateway interface addresses for DPU mode - var gwIps []*net.IPNet - ifAddrs = append(gwIps, nodeAddrs) - } - if err := util.SetNodePrimaryIfAddrs(nodeAnnotator, ifAddrs); err != nil { klog.Errorf("Unable to set primary IP net label on node, err: %v", err) } From 410550fbb05be947ca8b5ad44dc73c3d2141d2ac Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Fri, 21 Mar 2025 08:52:58 +0100 Subject: [PATCH 161/181] Remove support for receiving advertised routes on nodes Today when default network or UDN networks are advertised using RAs the nodes also learn the routes of other nodes' pod subnets in the cluster. Example snippet of exposing a UDN network on non-vrflite usecase: root@ovn-worker2:/# ip r show table 1048 default via 172.18.0.1 dev breth0 mtu 1400 10.96.0.0/16 via 169.254.0.4 dev breth0 mtu 1400 10.244.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 10.244.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 103.103.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 103.103.1.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 169.254.0.3 via 203.203.1.1 dev ovn-k8s-mp12 169.254.0.34 dev ovn-k8s-mp12 mtu 1400 172.26.0.0/16 nhid 41 via 172.18.0.5 dev breth0 proto bgp metric 20 203.203.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 203.203.0.0/16 via 203.203.1.1 dev ovn-k8s-mp12 203.203.1.0/24 dev ovn-k8s-mp12 proto kernel scope link src 203.203.1.2 local 203.203.1.2 dev ovn-k8s-mp12 proto kernel scope host src 203.203.1.2 broadcast 203.203.1.255 dev ovn-k8s-mp12 proto kernel scope link src 203.203.1.2 203.203.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 root@ovn-worker2:/# ip r show table 1046 default via 172.18.0.1 dev breth0 mtu 1400 10.96.0.0/16 via 169.254.0.4 dev breth0 mtu 1400 10.244.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 10.244.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 103.103.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 103.103.0.0/16 via 103.103.2.1 dev ovn-k8s-mp11 103.103.1.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 103.103.2.0/24 dev ovn-k8s-mp11 proto kernel scope link src 103.103.2.2 local 103.103.2.2 dev ovn-k8s-mp11 proto kernel scope host src 103.103.2.2 broadcast 103.103.2.255 dev ovn-k8s-mp11 proto kernel scope link src 103.103.2.2 169.254.0.3 via 103.103.2.1 dev ovn-k8s-mp11 169.254.0.32 dev ovn-k8s-mp11 mtu 1400 172.26.0.0/16 nhid 41 via 172.18.0.5 dev breth0 proto bgp metric 20 203.203.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 203.203.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 root@ovn-worker2:/# this happens because we import routes from the default VRF: prefixes: - 103.103.0.0/24 - 2014:100:200::/64 - 2016:100:200::/64 - 203.203.0.0/24 - asn: 64512 imports: - vrf: default vrf: mp11-udn-vrf - asn: 64512 imports: - vrf: default vrf: mp12-udn-vrf nodeSelector: matchLabels: kubernetes.io/hostname: ovn-worker raw: {} root@ovn-worker2:/# ip r default via 172.18.0.1 dev breth0 mtu 1400 10.96.0.0/16 via 169.254.0.4 dev breth0 mtu 1400 10.244.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 10.244.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 103.103.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 103.103.1.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 169.254.0.3 via 203.203.1.1 dev ovn-k8s-mp12 169.254.0.34 dev ovn-k8s-mp12 mtu 1400 172.26.0.0/16 nhid 41 via 172.18.0.5 dev breth0 proto bgp metric 20 203.203.0.0/24 nhid 39 via 172.18.0.4 dev breth0 proto bgp metric 20 203.203.0.0/16 via 203.203.1.1 dev ovn-k8s-mp12 203.203.1.0/24 dev ovn-k8s-mp12 proto kernel scope link src 203.203.1.2 local 203.203.1.2 dev ovn-k8s-mp12 proto kernel scope host src 203.203.1.2 broadcast 203.203.1.255 dev ovn-k8s-mp12 proto kernel scope link src 203.203.1.2 203.203.2.0/24 nhid 40 via 172.18.0.3 dev breth0 proto bgp metric 20 which directly breaks UDN isolation. In this commit we are going to remove the support for receiving routes. So advertising routes will only advertise routes and we will no longer make the nodes receive these routes. However in the future when we support overlay-mode with BGP, we will need to re-add these routes and design a better isolation model with UDNs within the cluster if that is desired. Signed-off-by: Surya Seetharaman --- .../routeadvertisements/controller.go | 36 ++---------- .../routeadvertisements/controller_test.go | 55 ++++--------------- 2 files changed, 17 insertions(+), 74 deletions(-) diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 18fb3dbaae..774a5341f3 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -593,14 +593,13 @@ func (c *Controller) generateFRRConfiguration( matchedNetworks sets.Set[string], ) (*frrtypes.FRRConfiguration, error) { routers := []frrtypes.Router{} - advertisements := sets.New(ra.Spec.Advertisements...) // go over the source routers for i, router := range source.Spec.BGP.Routers { targetVRF := ra.Spec.TargetVRF var matchedVRF, matchedNetwork string - var receivePrefixes, advertisePrefixes []string + var advertisePrefixes []string // We will use the router if: // - the router VRF matches the target VRF @@ -608,33 +607,25 @@ func (c *Controller) generateFRRConfiguration( // Prepare each scenario with a switch statement and check after that switch { case targetVRF == "auto" && router.VRF == "": - // match on default network/VRF, advertise node prefixes and receive - // any prefix of default network. + // match on default network/VRF, advertise node prefixes matchedVRF = "" matchedNetwork = types.DefaultNetworkName advertisePrefixes = selectedNetworks.hostNetworkSubnets[matchedNetwork] - receivePrefixes = selectedNetworks.networkSubnets[matchedNetwork] case targetVRF == "auto": - // match router.VRF to network.VRF, advertise node prefixes and - // receive any prefix of the matched network + // match router.VRF to network.VRF, advertise node prefixes matchedVRF = router.VRF matchedNetwork = selectedNetworks.networkVRFs[matchedVRF] advertisePrefixes = selectedNetworks.hostNetworkSubnets[matchedNetwork] - receivePrefixes = selectedNetworks.networkSubnets[matchedNetwork] case targetVRF == "": - // match on default network/VRF, advertise node prefixes and - // receive any prefix of selected networks + // match on default network/VRF, advertise node prefixes matchedVRF = "" matchedNetwork = types.DefaultNetworkName advertisePrefixes = selectedNetworks.hostSubnets - receivePrefixes = selectedNetworks.subnets default: - // match router.VRF to network.VRF, advertise node prefixes and - // receive any prefix of selected networks + // match router.VRF to network.VRF, advertise node prefixes matchedVRF = targetVRF matchedNetwork = selectedNetworks.networkVRFs[matchedVRF] advertisePrefixes = selectedNetworks.hostSubnets - receivePrefixes = selectedNetworks.subnets } if matchedVRF != router.VRF || len(advertisePrefixes) == 0 { // either this router VRF does not match the target VRF or we don't @@ -669,7 +660,6 @@ func (c *Controller) generateFRRConfiguration( isIPV6 := utilnet.IsIPv6String(neighbor.Address) advertisePrefixes := util.MatchAllIPNetsStringFamily(isIPV6, advertisePrefixes) - receivePrefixes := util.MatchAllIPNetsStringFamily(isIPV6, receivePrefixes) if len(advertisePrefixes) == 0 { continue } @@ -680,22 +670,6 @@ func (c *Controller) generateFRRConfiguration( Prefixes: advertisePrefixes, }, } - neighbor.ToReceive = frrtypes.Receive{ - Allowed: frrtypes.AllowedInPrefixes{ - Mode: frrtypes.AllowRestricted, - }, - } - if advertisements.Has(ratypes.PodNetwork) { - for _, prefix := range receivePrefixes { - neighbor.ToReceive.Allowed.Prefixes = append(neighbor.ToReceive.Allowed.Prefixes, - frrtypes.PrefixSelector{ - Prefix: prefix, - LE: selectedNetworks.prefixLength[prefix], - GE: selectedNetworks.prefixLength[prefix], - }, - ) - } - } targetRouter.Neighbors = append(targetRouter.Neighbors, neighbor) } if len(targetRouter.Neighbors) == 0 { diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go index 305418425c..cf8d58729f 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go @@ -152,7 +152,6 @@ type testNeighbor struct { ASN uint32 Address string DisableMP *bool - Receive []string Advertise []string } @@ -161,11 +160,6 @@ func (tn testNeighbor) Neighbor() frrapi.Neighbor { ASN: tn.ASN, Address: tn.Address, DisableMP: true, - ToReceive: frrapi.Receive{ - Allowed: frrapi.AllowedInPrefixes{ - Mode: frrapi.AllowRestricted, - }, - }, ToAdvertise: frrapi.Advertise{ Allowed: frrapi.AllowedOutPrefixes{ Mode: frrapi.AllowRestricted, @@ -176,31 +170,6 @@ func (tn testNeighbor) Neighbor() frrapi.Neighbor { if tn.DisableMP != nil { n.DisableMP = *tn.DisableMP } - for _, receive := range tn.Receive { - sep := strings.LastIndex(receive, "/") - if sep == -1 { - continue - } - if isLayer2 := strings.Count(receive, "/") == 1; isLayer2 { - n.ToReceive.Allowed.Prefixes = append(n.ToReceive.Allowed.Prefixes, - frrapi.PrefixSelector{ - Prefix: receive, - }, - ) - continue - } - - first := receive[:sep] - last := receive[sep+1:] - len := ovntest.MustAtoi(last) - n.ToReceive.Allowed.Prefixes = append(n.ToReceive.Allowed.Prefixes, - frrapi.PrefixSelector{ - Prefix: first, - GE: uint32(len), - LE: uint32(len), - }, - ) - } return n } @@ -433,7 +402,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, }}, }}, }, @@ -465,8 +434,8 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24", "fd01::/64", "fd03::ffff:100:101/128"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, - {ASN: 1, Address: "fd02::ffff:100:64", Advertise: []string{"fd01::/64", "fd03::ffff:100:101/128"}, Receive: []string{"fd01::/48/64"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, + {ASN: 1, Address: "fd02::ffff:100:64", Advertise: []string{"fd01::/64", "fd03::ffff:100:101/128"}}, }}, }}, }, @@ -503,7 +472,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}, Imports: []string{"black", "blue", "green", "red"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}, Receive: []string{"1.2.0.0/16/24", "1.3.0.0/16/24", "1.4.0.0/16", "1.5.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}}, }}, {ASN: 1, VRF: "black", Imports: []string{"default"}}, {ASN: 1, VRF: "blue", Imports: []string{"default"}}, @@ -636,7 +605,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, }}, }, }, @@ -744,13 +713,13 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.1.1.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.1.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.1.0/24"}}, }}, {ASN: 1, VRF: "red", Prefixes: []string{"1.2.1.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.1.0/24"}, Receive: []string{"1.2.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.1.0/24"}}, }}, {ASN: 1, VRF: "green", Prefixes: []string{"1.4.0.0/16"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}, Receive: []string{"1.4.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}}, }}, }, }, @@ -760,7 +729,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.1.2.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.2.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.2.0/24"}}, }}, }, }, @@ -770,10 +739,10 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"}, Routers: []*testRouter{ {ASN: 1, VRF: "red", Prefixes: []string{"1.2.2.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.2.0/24"}, Receive: []string{"1.2.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.2.0/24"}}, }}, {ASN: 1, VRF: "green", Prefixes: []string{"1.4.0.0/16"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}, Receive: []string{"1.4.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}}, }}, }, }, @@ -799,7 +768,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, }}, }, }, From ea1b6a018e6d537ac3460474ce7e517fddd312dd Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Tue, 22 Jul 2025 14:14:07 +0200 Subject: [PATCH 162/181] Don't use match as a criteria for isEquivalentMatch This is a temporary commit - we need a proper followup. Please see https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5407 for details. As of today all NATs created by OVN-Kubernetes are unique using the existing 5 tuple algo in IsEquivalentNAT - uuid, type of snat, logicalIP, logicalPort, externalIP, externalIDs. So its OK to get rid of match. But its not the correct way to fix this - in future we might have two NATs with all other fields except match being the same. Signed-off-by: Surya Seetharaman --- go-controller/pkg/libovsdb/ops/router.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index 18b3931a1f..27e8e38d48 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -1035,7 +1035,7 @@ func BuildDNATAndSNATWithMatch( // isEquivalentNAT checks if the `searched` NAT is equivalent to `existing`. // Returns true if the UUID is set in `searched` and matches the UUID of `existing`. // Otherwise, perform the following checks: -// - Compare the Type and Match fields. +// - Compare the Type. // - Compare ExternalIP if it is set in `searched`. // - Compare LogicalIP if the Type in `searched` is SNAT. // - Compare LogicalPort if it is set in `searched`. @@ -1050,10 +1050,6 @@ func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { return false } - if searched.Match != existing.Match { - return false - } - // Compare externalIP if it's not empty. if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { return false From 15adf65d17ffa28a64fce27bbc73491b0800330c Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Thu, 19 Jun 2025 19:12:30 +0200 Subject: [PATCH 163/181] Advertised networks: SNAT Traffic to nodeIP This PR is adding SNAT for advertised UDNs and CDN if the destination of the traffic is towards other nodes in the cluster. This is a design change for BGP from before (where pod->node was not SNATed and podIP was preserved). For normal UDNs we have 2 SNATs: L3 UDN SNATs: 1) this cSNAT is added to ovn_cluster_router for LGW egress traffic and SGW KAPI/DNS traffic: _uuid : 5485a25f-7a83-4dc0-840c-bbfbd0784aad allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-green-network, "k8s.ovn.org/topology"=layer3} external_ip : "169.254.0.38" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "203.203.0.0/24" logical_port : rtos-cluster_udn_tenant.green.network_ovn-control-plane match : "eth.dst == 0a:58:cb:cb:00:02" options : {stateless="false"} priority : 0 type : snat 2) this SNAT is added to GR for SGW egress traffic: _uuid : d85fd65f-e3f3-4d52-95f9-5f88c925aa5a allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-green-network, "k8s.ovn.org/topology"=layer3} external_ip : "169.254.0.37" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "203.203.0.0/16" logical_port : [] match : "" options : {stateless="false"} priority : 0 type : snat for L2, we have the following two SNATs both on GR: _uuid : a4b9942f-ec1a-42ca-81d9-3e4885ff2470 allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-blue-network, "k8s.ovn.org/topology"=layer2} external_ip : "169.254.0.36" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "93.93.0.0/16" logical_port : rtoj-GR_cluster_udn_tenant.blue.network_ovn-control-plane match : "eth.dst == 0a:58:5d:5d:00:02" options : {stateless="false"} priority : 0 type : snat and _uuid : 24164866-da95-4b6f-9c65-8b16fa202758 allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-blue-network, "k8s.ovn.org/topology"=layer2} external_ip : "169.254.0.35" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "93.93.0.0/16" logical_port : [] match : "outport == \"rtoe-GR_cluster_udn_tenant.blue.network_ovn-control-plane\"" options : {stateless="false"} priority : 0 type : snat now with advertised networks these will change to: _uuid : a4b9942f-ec1a-42ca-81d9-3e4885ff2470 allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-blue-network, "k8s.ovn.org/topology"=layer2} external_ip : "169.254.0.36" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "93.93.0.0/16" logical_port : rtoj-GR_cluster_udn_tenant.blue.network_ovn-control-plane match : "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" options : {stateless="false"} priority : 0 type : snat _uuid : 24164866-da95-4b6f-9c65-8b16fa202758 allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-blue-network, "k8s.ovn.org/topology"=layer2} external_ip : "169.254.0.35" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "93.93.0.0/16" logical_port : [] match : "outport == \"rtoe-GR_cluster_udn_tenant.blue.network_ovn-control-plane\" && ip4.dst == $a712973235162149816" options : {stateless="false"} priority : 0 type : snat _uuid : d85fd65f-e3f3-4d52-95f9-5f88c925aa5a allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-green-network, "k8s.ovn.org/topology"=layer3} external_ip : "169.254.0.37" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "203.203.0.0/16" logical_port : [] match : "ip4.dst == $a712973235162149816" options : {stateless="false"} priority : 0 type : snat _uuid : 5485a25f-7a83-4dc0-840c-bbfbd0784aad allowed_ext_ips : [] exempted_ext_ips : [] external_ids : {"k8s.ovn.org/network"=cluster_udn_tenant-green-network, "k8s.ovn.org/topology"=layer3} external_ip : "169.254.0.38" external_mac : [] external_port_range : "32768-60999" gateway_port : [] logical_ip : "203.203.0.0/24" logical_port : rtos-cluster_udn_tenant.green.network_ovn-control-plane match : "eth.dst == 0a:58:cb:cb:00:02 && (ip4.dst == $a712973235162149816)" options : {stateless="false"} priority : 0 type : snat so basically we add this extra match for destination IPs to SNAT to masqueradeIP for that UDN note: with this PR we will break hardware offload for assymmetry traffix for BGP L2 As for the CDN, we have 1 SNAT with no matches on GR and that is being changed to now have a cSNAT in case the default network is advertised. NOTE: In -ds flag mode, the per-pod SNAT will have this match set. NOTE2: For all deleteNAT scenarios we purposefully don't pass snat as a match criteria Signed-off-by: Surya Seetharaman Signed-off-by: Surya Seetharaman --- .../ovn/base_network_controller_secondary.go | 46 ++++++++--- go-controller/pkg/ovn/egressgw.go | 12 +-- go-controller/pkg/ovn/egressip.go | 22 ++++-- go-controller/pkg/ovn/gateway.go | 51 +++++++++++- go-controller/pkg/ovn/gateway_test.go | 32 ++++++++ go-controller/pkg/ovn/master_test.go | 42 +++++++--- go-controller/pkg/ovn/namespace.go | 40 +++++++++- go-controller/pkg/ovn/pods.go | 20 ++++- .../secondary_layer2_network_controller.go | 77 ++++++++----------- .../secondary_layer3_network_controller.go | 44 +++-------- go-controller/pkg/util/multi_network.go | 12 --- 11 files changed, 269 insertions(+), 129 deletions(-) diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_secondary.go index f9c6d0b18f..253524ff87 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary.go @@ -812,7 +812,7 @@ func (oc *BaseSecondaryNetworkController) allowPersistentIPs() bool { // buildUDNEgressSNAT is used to build the conditional SNAT required on L3 and L2 UDNs to // steer traffic correctly via mp0 when leaving OVN to the host -func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string) ([]*nbdb.NAT, error) { +func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string, isUDNAdvertised bool) ([]*nbdb.NAT, error) { if len(localPodSubnets) == 0 { return nil, nil // nothing to do } @@ -828,10 +828,11 @@ func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets [ types.TopologyExternalID: bsnc.TopologyType(), } for _, localPodSubnet := range localPodSubnets { + ipFamily := utilnet.IPv4 + masqIP, err = udn.AllocateV4MasqueradeIPs(networkID) if utilnet.IsIPv6CIDR(localPodSubnet) { masqIP, err = udn.AllocateV6MasqueradeIPs(networkID) - } else { - masqIP, err = udn.AllocateV4MasqueradeIPs(networkID) + ipFamily = utilnet.IPv6 } if err != nil { return nil, err @@ -839,12 +840,43 @@ func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets [ if masqIP == nil { return nil, fmt.Errorf("masquerade IP cannot be empty network %s (%d): %v", bsnc.GetNetworkName(), networkID, err) } - snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, - extIDs, getMasqueradeManagementIPSNATMatch(dstMac.String()))) + if !isUDNAdvertised { + snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, + extIDs, getMasqueradeManagementIPSNATMatch(dstMac.String()))) + } else { + // For advertised networks, we need to SNAT any traffic leaving the pods from these networks towards the node IPs + // in the cluster. In order to do such a conditional SNAT, we need an address set that contains the node IPs in the cluster. + // Given that egressIP feature already has an address set containing these nodeIPs owned by the default network controller, let's re-use it. + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) + addrSet, err := bsnc.addressSetFactory.GetAddressSet(dbIDs) + if err != nil { + return nil, fmt.Errorf("cannot ensure that addressSet %s exists: %w", NodeIPAddrSetName, err) + } + ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS := addrSet.GetASHashNames() + + snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, + extIDs, fmt.Sprintf("%s && (%s)", getMasqueradeManagementIPSNATMatch(dstMac.String()), + getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS, ipFamily)))) + } } return snats, nil } +func getMasqueradeManagementIPSNATMatch(dstMac string) string { + return fmt.Sprintf("eth.dst == %s", dstMac) +} + +// getClusterNodesDestinationBasedSNATMatch creates destination-based SNAT match for the specified IP family +func getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS string, ipFamily utilnet.IPFamily) string { + var match string + if ipFamily == utilnet.IPv4 { + match = fmt.Sprintf("ip4.dst == $%s", ipv4ClusterNodeIPAS) + } else { + match = fmt.Sprintf("ip6.dst == $%s", ipv6ClusterNodeIPAS) + } + return match +} + func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnotation *util.PodAnnotation, lsp *nbdb.LogicalSwitchPort) error { opts := []kubevirt.DHCPConfigsOpt{} @@ -867,10 +899,6 @@ func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnot return kubevirt.EnsureDHCPOptionsForLSP(bsnc.controllerName, bsnc.nbClient, pod, podAnnotation.IPs, lsp, opts...) } -func getMasqueradeManagementIPSNATMatch(dstMac string) string { - return fmt.Sprintf("eth.dst == %s", dstMac) -} - func (bsnc *BaseSecondaryNetworkController) requireDHCP(pod *corev1.Pod) bool { // Configure DHCP only for kubevirt VMs layer2 primary udn with subnets return kubevirt.IsPodOwnedByVirtualMachine(pod) && diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index b607a3b253..d9d8610aba 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -589,7 +589,7 @@ func (oc *DefaultNetworkController) deletePodSNAT(nodeName string, extIPs, podIP return nil } // Default network does not set any matches in Pod SNAT - ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets, "") + ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets) if err != nil { return err } @@ -639,8 +639,8 @@ func getExternalIPsGR(watchFactory *factory.WatchFactory, nodeName string) ([]*n // deletePodSNATOps creates ovsdb operation that removes per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet, match string) ([]ovsdb.Operation, error) { - nats, err := buildPodSNAT(extIPs, podIPNets, match) +func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet) ([]ovsdb.Operation, error) { + nats, err := buildPodSNAT(extIPs, podIPNets, "") // for delete, match is not needed - we try to cleanup all the SNATs that match the isEquivalentNAT predicate if err != nil { return nil, err } @@ -657,7 +657,7 @@ func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwR // addOrUpdatePodSNAT adds or updates per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet) error { - ops, err := addOrUpdatePodSNATOps(nbClient, gwRouterName, extIPs, podIfAddrs, nil) + ops, err := addOrUpdatePodSNATOps(nbClient, gwRouterName, extIPs, podIfAddrs, "", nil) if err != nil { return err } @@ -670,9 +670,9 @@ func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, ext // addOrUpdatePodSNATOps returns the operation that adds or updates per pod SNAT rules towards the nodeIP that are // applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { +func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, snatMatch string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { gwRouter := &nbdb.LogicalRouter{Name: gwRouterName} - nats, err := buildPodSNAT(extIPs, podIfAddrs, "") + nats, err := buildPodSNAT(extIPs, podIfAddrs, snatMatch) if err != nil { return nil, err } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 5f50cefb95..ed018c0de3 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -249,7 +249,7 @@ func NewEIPController( // CASE 3.4: Both Namespace && Pod Selectors on Spec changed // } // -// NOTE: `Spec.EgressIPs“ updates for EIP object are not processed here, that is the job of cluster manager +// NOTE: `Spec.EgressIPs" updates for EIP object are not processed here, that is the job of cluster manager // // We only care about `Spec.NamespaceSelector`, `Spec.PodSelector` and `Status` field func (e *EgressIPController) reconcileEgressIP(old, new *egressipv1.EgressIP) (err error) { @@ -2594,9 +2594,21 @@ func (e *EgressIPController) addExternalGWPodSNATOps(ni util.NetInfo, ops []ovsd if err != nil { return nil, err } - ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, ops) - if err != nil { - return nil, err + + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podIPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(e.nbClient, ni, pod.Spec.NodeName, util.IsPodNetworkAdvertisedAtNode(ni, pod.Spec.NodeName), ipFamily) + if err != nil { + return nil, fmt.Errorf("failed to get SNAT match for node %s for network %s: %w", pod.Spec.NodeName, ni.GetNetworkName(), err) + } + ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops) + if err != nil { + return nil, err + } } klog.V(5).Infof("Adding SNAT on %s since egress node managing %s/%s was the same: %s", pod.Spec.NodeName, pod.Namespace, pod.Name, status.Node) } @@ -2617,7 +2629,7 @@ func (e *EgressIPController) deleteExternalGWPodSNATOps(ni util.NetInfo, ops []o if err != nil { return nil, err } - ops, err = deletePodSNATOps(e.nbClient, ops, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs, "") + ops, err = deletePodSNATOps(e.nbClient, ops, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs) if err != nil { return nil, err } diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index a43adf5368..66bce44dfe 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -25,6 +25,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/gateway" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/gatewayrouter" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -42,7 +43,6 @@ type GatewayManager struct { nbClient libovsdbclient.Client netInfo util.NetInfo watchFactory *factory.WatchFactory - // Cluster wide Load_Balancer_Group UUID. // Includes all node switches and node gateway routers. clusterLoadBalancerGroupUUID string @@ -764,7 +764,9 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* nats := make([]*nbdb.NAT, 0, len(clusterIPSubnet)) var nat *nbdb.NAT - if (!config.Gateway.DisableSNATMultipleGWs || gw.netInfo.IsPrimaryNetwork()) && !gw.isRoutingAdvertised(nodeName) { + // DisableSNATMultipleGWs is only applicable to cluster default network and not to user defined networks. + // For user defined networks, we always add SNAT rules regardless of whether the network is advertised or not. + if !config.Gateway.DisableSNATMultipleGWs || gw.netInfo.IsPrimaryNetwork() { // Default SNAT rules. DisableSNATMultipleGWs=false in LGW (traffic egresses via mp0) always. // We are not checking for gateway mode to be shared explicitly to reduce topology differences. for _, entry := range clusterIPSubnet { @@ -774,7 +776,17 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* gw.gwRouterName, err) } - nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) + // Get the match for this specific subnet's IP family + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(entry) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(gw.nbClient, gw.netInfo, nodeName, gw.isRoutingAdvertised(nodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %w", nodeName, gw.netInfo.GetNetworkName(), err) + } + + nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, snatMatch) nats = append(nats, nat) } err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, nats...) @@ -784,7 +796,7 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* } else { // ensure we do not have any leftover SNAT entries after an upgrade for _, logicalSubnet := range clusterIPSubnet { - nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) + nat = libovsdbops.BuildSNAT(nil, logicalSubnet, "", extIDs) nats = append(nats, nat) } err = libovsdbops.DeleteNATs(gw.nbClient, gwRouter, nats...) @@ -902,6 +914,37 @@ func (gw *GatewayManager) gatewayInit( return nil } +// GetNetworkScopedClusterSubnetSNATMatch returns the match for the SNAT rule for the cluster default network +// and the match for the SNAT rule for the L3/L2 user defined network. +// If the network is not advertised: +// - For Layer2 topology, the match is the output port of the GR to the join switch since in L2 there is only 1 router but two cSNATs. +// - For Layer3 topology, the match is empty. +// If the network is advertised: +// - For Layer2 topology, the match is the output port of the GR to the join switch and the destination must be a nodeIP in the cluster. +// - For Layer3 topology, the match is the destination must be a nodeIP in the cluster. +func GetNetworkScopedClusterSubnetSNATMatch(nbClient libovsdbclient.Client, netInfo util.NetInfo, nodeName string, isNetworkAdvertised bool, ipFamily utilnet.IPFamily) (string, error) { + if !isNetworkAdvertised { + if netInfo.TopologyType() != types.Layer2Topology { + return "", nil + } + return fmt.Sprintf("outport == %q", types.GWRouterToExtSwitchPrefix+netInfo.GetNetworkScopedGWRouterName(nodeName)), nil + } else { + // if the network is advertised, we need to ensure that the SNAT exists with the correct conditional destination match + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) + addressSetFactory := addressset.NewOvnAddressSetFactory(nbClient, config.IPv4Mode, config.IPv6Mode) + addrSet, err := addressSetFactory.GetAddressSet(dbIDs) + if err != nil { + return "", fmt.Errorf("cannot ensure that addressSet %s exists %v", NodeIPAddrSetName, err) + } + ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS := addrSet.GetASHashNames() + destinationMatch := getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS, ipFamily) + if netInfo.TopologyType() != types.Layer2Topology { + return destinationMatch, nil + } + return fmt.Sprintf("outport == %q && (%s)", types.GWRouterToExtSwitchPrefix+netInfo.GetNetworkScopedGWRouterName(nodeName), destinationMatch), nil + } +} + // addExternalSwitch creates a switch connected to the external bridge and connects it to // the gateway router func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, macAddress, physNetworkName string, ipAddresses []*net.IPNet, vlanID *uint) error { diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index 61f89e831d..893d17ad09 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -65,6 +65,15 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN expectedNodeSwitch *nbdb.LogicalSwitch, nodeName string, clusterIPSubnets []*net.IPNet, hostSubnets []*net.IPNet, l3GatewayConfig *util.L3GatewayConfig, joinLRPIPs, defLRPIPs []*net.IPNet, skipSnat bool, nodeMgmtPortIP, gatewayMTU string) []libovsdbtest.TestData { + return generateGatewayInitExpectedNBWithPodNetworkAdvertised(testData, expectedOVNClusterRouter, expectedNodeSwitch, + nodeName, clusterIPSubnets, hostSubnets, l3GatewayConfig, joinLRPIPs, defLRPIPs, skipSnat, nodeMgmtPortIP, + gatewayMTU, false) // Default to no pod network advertised +} + +func generateGatewayInitExpectedNBWithPodNetworkAdvertised(testData []libovsdbtest.TestData, expectedOVNClusterRouter *nbdb.LogicalRouter, + expectedNodeSwitch *nbdb.LogicalSwitch, nodeName string, clusterIPSubnets []*net.IPNet, hostSubnets []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, joinLRPIPs, defLRPIPs []*net.IPNet, skipSnat bool, nodeMgmtPortIP, + gatewayMTU string, isPodNetworkAdvertised bool) []libovsdbtest.TestData { GRName := "GR_" + nodeName gwSwitchPort := types.JoinSwitchToGWRouterPrefix + GRName @@ -214,6 +223,16 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN }, Networks: networks, }) + var egressNodeIPsASv4, egressNodeIPsASv6 *nbdb.AddressSet + if config.OVNKubernetesFeature.EnableEgressIP { + egressNodeIPsASv4, egressNodeIPsASv6 = buildEgressIPNodeAddressSets(physicalIPs) + if config.IPv4Mode { + testData = append(testData, egressNodeIPsASv4) + } + if config.IPv6Mode { + testData = append(testData, egressNodeIPsASv6) + } + } natUUIDs := make([]string, 0, len(clusterIPSubnets)) if !skipSnat { @@ -231,6 +250,19 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN if config.Gateway.Mode != config.GatewayModeDisabled { nat.ExternalPortRange = config.DefaultEphemeralPortRange } + if isPodNetworkAdvertised { + // IPv6 pod network + if utilnet.IsIPv6CIDR(subnet) { + if egressNodeIPsASv6 != nil { + nat.Match = fmt.Sprintf("ip6.dst == $%s", egressNodeIPsASv6.Name) + } + } else { + // IPv4 pod network + if egressNodeIPsASv4 != nil { + nat.Match = fmt.Sprintf("ip4.dst == $%s", egressNodeIPsASv4.Name) + } + } + } testData = append(testData, &nat) } } diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index b7b902f740..866b6309fa 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -963,6 +963,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { // Restore global default values before each testcase gomega.Expect(config.PrepareTestConfig()).To(gomega.Succeed()) fakeOvn = NewFakeOVN(true) + config.OVNKubernetesFeature.EnableEgressIP = true app = cli.NewApp() app.Name = "test" @@ -1043,6 +1044,19 @@ var _ = ginkgo.Describe("Default network controller operations", func() { l3GatewayConfig = node1.gatewayConfig(config.GatewayModeLocal, uint(vlanID)) err = util.SetL3GatewayConfig(nodeAnnotator, l3GatewayConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if config.OVNKubernetesFeature.EnableEgressIP { + physicalIPs := []string{} + for _, ip := range l3GatewayConfig.IPAddresses { + physicalIPs = append(physicalIPs, ip.IP.String()) + } + egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets(physicalIPs) + if config.IPv4Mode { + dbSetup.NBData = append(dbSetup.NBData, egressNodeIPsASv4) + } + if config.IPv6Mode { + dbSetup.NBData = append(dbSetup.NBData, egressNodeIPsASv6) + } + } err = util.UpdateNodeManagementPortMACAddresses(&testNode, nodeAnnotator, ovntest.MustParseMAC(node1.NodeMgmtPortMAC), types.DefaultNetworkName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1249,6 +1263,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { "reconciles pod network SNATs from syncGateway", func(condition func(*DefaultNetworkController) error, expectedExtraNATs ...*nbdb.NAT) { app.Action = func(ctx *cli.Context) error { + // Initialize config from CLI flags (including --init-gateways) _, err := config.InitConfig(ctx, nil, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1258,6 +1273,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { _, err = fakeClient.KubeClient.CoreV1().Pods(ns.Name).Create(context.TODO(), &pod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // generate specific test conditions (after base config is set) + err = condition(oc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Let the real code run and ensure OVN database sync gomega.Expect(oc.WatchNodes()).To(gomega.Succeed()) @@ -1268,10 +1287,6 @@ var _ = ginkgo.Describe("Default network controller operations", func() { err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNats...) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // generate specific test conditions - err = condition(oc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // ensure the stale SNAT's are cleaned up gomega.Expect(oc.StartServiceController(wg, false)).To(gomega.Succeed()) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) @@ -1281,19 +1296,23 @@ var _ = ginkgo.Describe("Default network controller operations", func() { err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - skipSnat := config.Gateway.DisableSNATMultipleGWs || oc.isPodNetworkAdvertisedAtNode(node1.Name) + skipSnat := config.Gateway.DisableSNATMultipleGWs && !oc.GetNetInfo().IsPrimaryNetwork() var clusterSubnets []*net.IPNet for _, clusterSubnet := range config.Default.ClusterSubnets { clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) - expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, - expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, - []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, - skipSnat, node1.NodeMgmtPortIP, "1400") - - if oc.isPodNetworkAdvertisedAtNode(node1.Name) { + if !oc.isPodNetworkAdvertisedAtNode(node1.Name) { + expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, + expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, + []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, + skipSnat, node1.NodeMgmtPortIP, "1400") + } else { + expectedNBDatabaseState = generateGatewayInitExpectedNBWithPodNetworkAdvertised(expectedNBDatabaseState, expectedOVNClusterRouter, + expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, + []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, + skipSnat, node1.NodeMgmtPortIP, "1400", true) addrSet, err := oc.addressSetFactory.GetAddressSet(GetAdvertisedNetworkSubnetsAddressSetDBIDs()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedNBDatabaseState = generateAdvertisedUDNIsolationExpectedNB(expectedNBDatabaseState, oc.GetNetworkName(), oc.GetNetworkID(), clusterSubnets, expectedNodeSwitch, addrSet) @@ -1353,6 +1372,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { "When pod network is advertised and DisableSNATMultipleGWs is false", func(oc *DefaultNetworkController) error { config.Gateway.DisableSNATMultipleGWs = false + config.OVNKubernetesFeature.EnableEgressIP = true mutableNetInfo := util.NewMutableNetInfo(oc.GetNetInfo()) mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{"node1": {"vrf"}}) return oc.Reconcile(mutableNetInfo) diff --git a/go-controller/pkg/ovn/namespace.go b/go-controller/pkg/ovn/namespace.go index 01f189228b..07282de4df 100644 --- a/go-controller/pkg/ovn/namespace.go +++ b/go-controller/pkg/ovn/namespace.go @@ -8,10 +8,12 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -234,9 +236,41 @@ func (oc *DefaultNetworkController) updateNamespace(old, newer *corev1.Namespace if err != nil { errors = append(errors, err) } else { - if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { - errors = append(errors, err) - } else if err = addOrUpdatePodSNAT(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs); err != nil { + // Helper function to handle the complex SNAT operations + handleSNATOps := func() error { + extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName) + if err != nil { + return err + } + + var ops []ovsdb.Operation + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podAnnotation.IPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(oc.nbClient, oc.GetNetInfo(), pod.Spec.NodeName, oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %v", pod.Spec.NodeName, oc.GetNetworkName(), err) + } + ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops) + if err != nil { + return err + } + } + + // Execute all operations in a single transaction + if len(ops) > 0 { + _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to update SNAT for pod %s on router %s: %v", pod.Name, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), err) + } + } + return nil + } + + if err := handleSNATOps(); err != nil { errors = append(errors, err) } } diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 5c3478f3cb..0ad9442e3e 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" "github.com/ovn-kubernetes/libovsdb/ovsdb" @@ -310,13 +311,26 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *corev1.Pod) (err error) if err != nil { return err } - } else if config.Gateway.DisableSNATMultipleGWs && !oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName) { + } else if config.Gateway.DisableSNATMultipleGWs { // Add NAT rules to pods if disable SNAT is set and does not have // namespace annotations to go through external egress router if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { return err - } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, ops); err != nil { - return err + } else { + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podAnnotation.IPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(oc.nbClient, oc.GetNetInfo(), pod.Spec.NodeName, oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %v", pod.Spec.NodeName, oc.GetNetworkName(), err) + } + if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops); err != nil { + return err + } + } } } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index 7ce63fc278..dacf37d090 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -575,36 +575,40 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 gwManager := oc.gatewayManagerForNode(node.Name) oc.gatewayManagers.Store(node.Name, gwManager) - gwConfig, err := oc.nodeGatewayConfig(node) - if err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { + err := func() error { + gwConfig, err := oc.nodeGatewayConfig(node) + if err != nil { + return err + } if err := gwManager.SyncGateway( node, gwConfig, ); err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { - if !util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { - err = oc.addUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName) - if err == nil && util.IsRouteAdvertisementsEnabled() { - err = oc.deleteAdvertisedNetworkIsolation(node.Name) - } - } else { - err = oc.deleteUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName) - if err == nil { - err = oc.addAdvertisedNetworkIsolation(node.Name) + return err + } + isUDNAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, node.Name) + err = oc.addOrUpdateUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName, isUDNAdvertised) + if err != nil { + return err + } + if !isUDNAdvertised { + if util.IsRouteAdvertisementsEnabled() { + if err = oc.deleteAdvertisedNetworkIsolation(node.Name); err != nil { + return err } } - if err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { - oc.gatewaysFailed.Delete(node.Name) + } else { + if err = oc.addAdvertisedNetworkIsolation(node.Name); err != nil { + return err } } + oc.gatewaysFailed.Delete(node.Name) + return nil + }() + + if err != nil { + errs = append(errs, err) + oc.gatewaysFailed.Store(node.Name, true) } } @@ -741,7 +745,8 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e return nil } -// addUDNClusterSubnetEgressSNAT adds the SNAT on each node's GR in L2 networks +// addOrUpdateUDNClusterSubnetEgressSNAT adds or updates the SNAT on each node's GR in L2 networks for each UDN +// Based on the isUDNAdvertised flag, the SNAT matches are slightly different // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/14 // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 // these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs @@ -751,9 +756,12 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e // externalIP = "169.254.0.12"; which is the masqueradeIP for this L2 UDN // so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, // which are leaving via UDN's mpX interface to the UDN's masqueradeIP. -func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string) error { +// If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network +// leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: +// "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/16" +func (oc *SecondaryLayer2NetworkController) addOrUpdateUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string, isUDNAdvertised bool) error { outputPort := types.GWRouterToJoinSwitchPrefix + gwRouterName - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { return err } @@ -770,25 +778,6 @@ func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localP return nil } -func (oc *SecondaryLayer2NetworkController) deleteUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, routerName string) error { - outputPort := types.GWRouterToJoinSwitchPrefix + routerName - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) - if err != nil { - return err - } - if len(nats) == 0 { - return nil // nothing to do - } - router := &nbdb.LogicalRouter{ - Name: routerName, - } - if err := libovsdbops.DeleteNATs(oc.nbClient, router, nats...); err != nil { - return fmt.Errorf("failed to delete SNAT for cluster on router: %q for network %q, error: %w", - routerName, oc.GetNetworkName(), err) - } - return nil -} - func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index b2355b9100..e9745fe9b2 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -857,7 +857,8 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev return err } -// addNodeSubnetEgressSNAT adds the SNAT on each node's ovn-cluster-router in L3 networks +// addOrUpdateUDNNodeSubnetEgressSNAT adds or updates the SNAT on each node's ovn-cluster-router in L3 networks for each UDN +// Based on the isUDNAdvertised flag, the SNAT matches are slightly different // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/24 // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 // these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs @@ -867,9 +868,12 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev // externalIP = "169.254.0.12"; which is the masqueradeIP for this L3 UDN // so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, // which are leaving via UDN's mpX interface to the UDN's masqueradeIP. -func (oc *SecondaryLayer3NetworkController) addUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node) error { +// If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network +// leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: +// "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/24" +func (oc *SecondaryLayer3NetworkController) addOrUpdateUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node, isUDNAdvertised bool) error { outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { return fmt.Errorf("failed to build UDN masquerade SNATs for network %q on node %q, err: %w", oc.GetNetworkName(), node.Name, err) @@ -887,28 +891,6 @@ func (oc *SecondaryLayer3NetworkController) addUDNNodeSubnetEgressSNAT(localPodS return nil } -// deleteUDNNodeSubnetEgressSNAT deletes SNAT rule from network specific -// ovn_cluster_router depending on whether the network is advertised or not -func (oc *SecondaryLayer3NetworkController) deleteUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node) error { - outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) - if err != nil { - return fmt.Errorf("failed to build UDN masquerade SNATs for network %q on node %q, err: %w", - oc.GetNetworkName(), node.Name, err) - } - if len(nats) == 0 { - return nil // nothing to do - } - router := &nbdb.LogicalRouter{ - Name: oc.GetNetworkScopedClusterRouterName(), - } - if err := libovsdbops.DeleteNATs(oc.nbClient, router, nats...); err != nil { - return fmt.Errorf("failed to delete SNAT for node subnet on router: %q for network %q, error: %w", - oc.GetNetworkScopedClusterRouterName(), oc.GetNetworkName(), err) - } - return nil -} - func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { // Node subnet for the secondary layer3 network is allocated by cluster manager. // Make sure that the node is allocated with the subnet before proceeding @@ -923,19 +905,17 @@ func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.I return nil, err } if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { - if !util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { - if err := oc.addUDNNodeSubnetEgressSNAT(hostSubnets, node); err != nil { - return nil, err - } + isUDNAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, node.Name) + if err := oc.addOrUpdateUDNNodeSubnetEgressSNAT(hostSubnets, node, isUDNAdvertised); err != nil { + return nil, err + } + if !isUDNAdvertised { if util.IsRouteAdvertisementsEnabled() { if err := oc.deleteAdvertisedNetworkIsolation(node.Name); err != nil { return nil, err } } } else { - if err := oc.deleteUDNNodeSubnetEgressSNAT(hostSubnets, node); err != nil { - return nil, err - } if err := oc.addAdvertisedNetworkIsolation(node.Name); err != nil { return nil, err } diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index fd91edd3be..841ab001b8 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -82,7 +82,6 @@ type NetInfo interface { GetNetworkScopedExtPortName(bridgeID, nodeName string) string GetNetworkScopedLoadBalancerName(lbName string) string GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string - GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string // GetNetInfo is an identity method used to get the specific NetInfo // implementation @@ -543,10 +542,6 @@ func (nInfo *DefaultNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName s return nInfo.GetNetworkScopedName(lbGroupName) } -func (nInfo *DefaultNetInfo) GetNetworkScopedClusterSubnetSNATMatch(_ string) string { - return "" -} - func (nInfo *DefaultNetInfo) canReconcile(netInfo NetInfo) bool { _, ok := netInfo.(*DefaultNetInfo) return ok @@ -738,13 +733,6 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName return nInfo.GetNetworkScopedName(lbGroupName) } -func (nInfo *secondaryNetInfo) GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string { - if nInfo.TopologyType() != types.Layer2Topology { - return "" - } - return fmt.Sprintf("outport == %q", types.GWRouterToExtSwitchPrefix+nInfo.GetNetworkScopedGWRouterName(nodeName)) -} - // getPrefix returns if the logical entities prefix for this network func (nInfo *secondaryNetInfo) getPrefix() string { return GetSecondaryNetworkPrefix(nInfo.netName) From f32731c22d96b250d3d484a713a83441e3fdb401 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 2 Jul 2025 10:53:40 +0200 Subject: [PATCH 164/181] BGP,UDN,LGW: Ensure both masqueradeIP and podsubnet ip rules are present Given that some traffic like pod->node and pod->nodeport will be SNATed to nodeIP for UDNs, we will need iprules for both masqueradeIP and nodeIP to be present when networks are advertised. This is nothing complicated as keeping the masqueradeIP dangling around doesn't hurt anything (I hope :)) so for pod->node it follows the normal UDN LGW egress traffic flow: 1) pod->switch->ovn_cluster_router 2) SNAT at the router to masIP 3) ovn_cluster_router->switch->mpX 4) goes out and then reply coming from outside will hit these masqueradeIP rules to come back in since we snated to masqueradeIP on the way out, so we need both podsubnet and masqueradeIP rules for advertised networks for all other traffic no SNATing is done Signed-off-by: Surya Seetharaman --- go-controller/pkg/node/gateway_udn.go | 19 +++++++++++-------- go-controller/pkg/node/gateway_udn_test.go | 13 ++++--------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 026ecd94fc..f7d2e27a01 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -644,17 +644,20 @@ func (udng *UserDefinedNetworkGateway) getV6MasqueradeIP() (*net.IPNet, error) { // constructUDNVRFIPRules constructs rules that redirect matching packets // into the corresponding UDN VRF routing table. -// If the network is not advertised, an example of the rules we set for a -// network is: -// 2000: from all fwmark 0x1001 lookup 1007 -// 2000: from all to 169.254.0.12 lookup 1007 -// 2000: from all fwmark 0x1002 lookup 1009 -// 2000: from all to 169.254.0.14 lookup 1009 -// If the network is advertised, an example of the rules we set for a network is: +// If the network is not advertised, an example of the rules we set for two +// networks is: +// 2000: from all fwmark 0x1001 lookup 1007 +// 2000: from all to 169.254.0.12 lookup 1007 +// 2000: from all fwmark 0x1002 lookup 1009 +// 2000: from all to 169.254.0.14 lookup 1009 +// If the network is advertised, an example of the rules we set for two +// networks is: // 2000: from all fwmark 0x1001 lookup 1007 // 2000: from all to 10.132.0.0/14 lookup 1007 +// 2000: from all to 169.254.0.12 lookup 1007 // 2000: from all fwmark 0x1001 lookup 1009 // 2000: from all to 10.134.0.0/14 lookup 1009 +// 2000: from all to 169.254.0.14 lookup 1009 func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertised bool) ([]netlink.Rule, []netlink.Rule, error) { var addIPRules []netlink.Rule var delIPRules []netlink.Rule @@ -693,7 +696,7 @@ func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertise delIPRules = append(delIPRules, subnetIPRules...) default: addIPRules = append(addIPRules, subnetIPRules...) - delIPRules = append(delIPRules, masqIPRules...) + addIPRules = append(addIPRules, masqIPRules...) } return addIPRules, delIPRules, nil } diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 34848faf7e..0ab0bf573b 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -1625,7 +1625,6 @@ func TestConstructUDNVRFIPRules(t *testing.T) { cidr := "" if config.IPv4Mode { cidr = "100.128.0.0/16/24" - } if config.IPv4Mode && config.IPv6Mode { cidr += ",ae70::/60/64" @@ -1711,8 +1710,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1007, dst: *ovntest.MustParseIPNet("100.128.0.0/16"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, @@ -1738,8 +1735,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1009, dst: *ovntest.MustParseIPNet("ae70::/60"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V6, @@ -1777,8 +1772,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1010, dst: *ovntest.MustParseIPNet("ae70::/60"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, @@ -1813,9 +1806,9 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { cidr = "100.128.0.0/16/24" } if config.IPv4Mode && config.IPv6Mode { - cidr += ",ae70::/60" + cidr += ",ae70::/60/64" } else if config.IPv6Mode { - cidr = "ae70::/60" + cidr = "ae70::/60/64" } nad := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, cidr, types.NetworkRolePrimary) @@ -1844,6 +1837,8 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { udnGateway.vrfTableId = test.vrftableID rules, delRules, err := udnGateway.constructUDNVRFIPRules(true) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rules).To(HaveLen(len(test.expectedRules))) + g.Expect(delRules).To(HaveLen(len(test.deleteRules))) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) g.Expect(rule.Table).To(Equal(test.expectedRules[i].table)) From 501bcbff9f08ce32b7b3251f16b389b8a3ccdecc Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 9 Jul 2025 11:10:16 +0200 Subject: [PATCH 165/181] Convert LGW postrouting rules to NFT This commit is a prep-commit that converts the LGW POSTROUTING chain rules from IPT to NFT. Why do we need to do this now? It's because for BGP we want to use the PMTUD remote nodeIP NFT sets to also do conditional masquerading in Local Gateway mode for BGP when traffic leaves UDNs towards other nodes in the cluster or other nodeports. Given PMTUD rules are in NFT but the lgw and udn masquerade rules are in IPT - we'd need to pick one to express all - since we want to move to NFT, its better to go that route. Below is how the rules look like. chain ovn-kube-local-gw-masq { comment "OVN local gateway masquerade" type nat hook postrouting priority srcnat; policy accept; ip saddr 169.254.0.1 masquerade ip6 saddr fd69::1 masquerade jump ovn-kube-pod-subnet-masq jump ovn-kube-udn-masq } chain ovn-kube-pod-subnet-masq { ip saddr 10.244.2.0/24 masquerade ip6 saddr fd00:10:244:1::/64 masquerade } chain ovn-kube-udn-masq { comment "OVN UDN masquerade" ip saddr != 169.254.0.0/29 ip daddr != 10.96.0.0/16 ip saddr 169.254.0.0/17 masquerade ip6 saddr != fd69::/125 ip daddr != fd00:10:96::/112 ip6 saddr fd69::/112 masquerade } This commit was AI-Cursor-gemini/claude assissted under my supervision/prompting/reviewing/back-forth iterations Signed-off-by: Surya Seetharaman --- go-controller/pkg/node/gateway.go | 4 +- .../pkg/node/gateway_init_linux_test.go | 31 +- go-controller/pkg/node/gateway_iptables.go | 167 +--------- go-controller/pkg/node/gateway_localnet.go | 24 +- go-controller/pkg/node/gateway_nftables.go | 299 ++++++++++++++++++ 5 files changed, 342 insertions(+), 183 deletions(-) diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 9b43fc95a5..7f11a0b813 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -522,8 +522,8 @@ func (g *gateway) updateSNATRules() error { subnets := util.IPsToNetworkIPs(g.nodeIPManager.mgmtPort.GetAddresses()...) if g.GetDefaultPodNetworkAdvertised() || config.Gateway.Mode != config.GatewayModeLocal { - return delLocalGatewayPodSubnetNATRules(subnets...) + return delLocalGatewayPodSubnetNFTRules() } - return addLocalGatewayPodSubnetNATRules(subnets...) + return addLocalGatewayPodSubnetNFTRules(subnets...) } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 6e8aadc0f5..9bc0cc5401 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -80,6 +80,17 @@ add chain inet ovn-kubernetes udn-service-prerouting { type filter hook prerouti add rule inet ovn-kubernetes udn-service-prerouting iifname != %s jump udn-service-mark add chain inet ovn-kubernetes udn-service-output { type filter hook output priority -150 ; comment "UDN services packet mark - Output" ; } add rule inet ovn-kubernetes udn-service-output jump udn-service-mark +add chain inet ovn-kubernetes ovn-kube-udn-masq { comment "OVN UDN masquerade" ; } +add rule inet ovn-kubernetes ovn-kube-udn-masq ip saddr != 169.254.169.0/29 ip daddr != 172.16.1.0/24 ip saddr 169.254.169.0/24 masquerade +add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-udn-masq +` + +const baseLGWNFTablesRules = ` +add rule inet ovn-kubernetes ovn-kube-local-gw-masq ip saddr 169.254.169.1 masquerade +add chain inet ovn-kubernetes ovn-kube-local-gw-masq { type nat hook postrouting priority 100 ; comment "OVN local gateway masquerade" ; } +add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-pod-subnet-masq +add rule inet ovn-kubernetes ovn-kube-pod-subnet-masq ip saddr 10.1.1.0/24 masquerade +add chain inet ovn-kubernetes ovn-kube-pod-subnet-masq ` func getBaseNFTRules(mgmtPort string) string { @@ -90,6 +101,10 @@ func getBaseNFTRules(mgmtPort string) string { return ret } +func getBaseLGWNFTablesRules(mgmtPort string) string { + return getBaseNFTRules(mgmtPort) + baseLGWNFTablesRules +} + func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, eth0Name, eth0MAC, eth0GWIP, eth0CIDR string, gatewayVLANID uint, l netlink.Link, hwOffload, setNodeIP bool) { const mtu string = "1234" @@ -1350,10 +1365,6 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "POSTROUTING": []string{ - "-s 169.254.169.1 -j MASQUERADE", - "-s 10.1.1.0/24 -j MASQUERADE", - }, "OVN-KUBE-ETP": []string{}, "OVN-KUBE-ITP": []string{}, }, @@ -1379,16 +1390,6 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` "OVN-KUBE-ITP": []string{}, }, } - if util.IsNetworkSegmentationSupportEnabled() { - expectedTables["nat"]["POSTROUTING"] = append(expectedTables["nat"]["POSTROUTING"], - "-j OVN-KUBE-UDN-MASQUERADE", - ) - expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"] = append(expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"], - "-s 169.254.169.0/29 -j RETURN", // this guarantees we don't SNAT default network masqueradeIPs - "-d 172.16.1.0/24 -j RETURN", // this guarantees we don't SNAT service traffic - "-s 169.254.169.0/24 -j MASQUERADE", // this guarantees we SNAT all UDN MasqueradeIPs traffic leaving the node - ) - } f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, map[util.FakePolicyKey]string{{ Table: "filter", @@ -1405,7 +1406,7 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) - expectedNFT := getBaseNFTRules(types.K8sMgmtIntfName) + expectedNFT := getBaseLGWNFTablesRules(types.K8sMgmtIntfName) err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/gateway_iptables.go b/go-controller/pkg/node/gateway_iptables.go index e9b6b12387..90bffbe91f 100644 --- a/go-controller/pkg/node/gateway_iptables.go +++ b/go-controller/pkg/node/gateway_iptables.go @@ -21,11 +21,10 @@ import ( ) const ( - iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT - iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT - iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only - iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT - iptableUDNMasqueradeChain = "OVN-KUBE-UDN-MASQUERADE" // called from nat-POSTROUTING + iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT + iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT + iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only + iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT ) func clusterIPTablesProtocols() []iptables.Protocol { @@ -69,29 +68,11 @@ func restoreIptRulesFiltered(rules []nodeipt.Rule, filter map[string]map[string] return nodeipt.RestoreRulesFiltered(rules, filter) } -// appendIptRules adds the provided rules in an append fashion -// i.e each rule gets added at the last position in the chain -func appendIptRules(rules []nodeipt.Rule) error { - return nodeipt.AddRules(rules, true) -} - // deleteIptRules removes provided rules from the chain func deleteIptRules(rules []nodeipt.Rule) error { return nodeipt.DelRules(rules) } -// ensureChain ensures that a chain exists within a table -func ensureChain(table, chain string) error { - for _, proto := range clusterIPTablesProtocols() { - ipt, err := util.GetIPTablesHelper(proto) - if err != nil { - return fmt.Errorf("failed to get IPTables helper to add UDN chain: %v", err) - } - addChaintoTable(ipt, table, chain) - } - return nil -} - func getGatewayInitRules(chain string, proto iptables.Protocol) []nodeipt.Rule { iptRules := []nodeipt.Rule{} if chain == iptableITPChain { @@ -403,123 +384,8 @@ func getLocalGatewayFilterRules(ifname string, cidr *net.IPNet) []nodeipt.Rule { } } -func getLocalGatewayPodSubnetNATRules(cidr *net.IPNet) []nodeipt.Rule { - protocol := getIPTablesProtocol(cidr.IP.String()) - return []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{ - "-s", cidr.String(), - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - } -} - -// getUDNMasqueradeRules is only called for local-gateway-mode -func getUDNMasqueradeRules(protocol iptables.Protocol) []nodeipt.Rule { - // the following rules are actively used only for the UDN Feature: - // -A POSTROUTING -j OVN-KUBE-UDN-MASQUERADE - // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/29 -j RETURN - // -A OVN-KUBE-UDN-MASQUERADE -d 10.96.0.0/16 -j RETURN - // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/17 -j MASQUERADE - // NOTE: Ordering is important here, the RETURN must come before - // the MASQUERADE rule. Please don't change the ordering. - srcUDNMasqueradePrefix := config.Gateway.V4MasqueradeSubnet - ipFamily := utilnet.IPv4 - if protocol == iptables.ProtocolIPv6 { - srcUDNMasqueradePrefix = config.Gateway.V6MasqueradeSubnet - ipFamily = utilnet.IPv6 - } - // defaultNetworkReservedMasqueradePrefix contains the first 6 IPs in the - // masquerade range that shouldn't be masqueraded. Hence it's always 3 bits (8 - // IPs) wide, regardless of IP family. - _, ipnet, _ := net.ParseCIDR(srcUDNMasqueradePrefix) - _, len := ipnet.Mask.Size() - defaultNetworkReservedMasqueradePrefix := fmt.Sprintf("%s/%d", ipnet.IP.String(), len-3) - - rules := []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{"-j", iptableUDNMasqueradeChain}, // NOTE: AddRules will take care of creating the chain - Protocol: protocol, - }, - { - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-s", defaultNetworkReservedMasqueradePrefix, - "-j", "RETURN", - }, - Protocol: protocol, - }, - } - for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { - if utilnet.IPFamilyOfCIDR(svcCIDR) != ipFamily { - continue - } - rules = append(rules, - nodeipt.Rule{ - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-d", svcCIDR.String(), - "-j", "RETURN", - }, - Protocol: protocol, - }, - ) - } - rules = append(rules, - nodeipt.Rule{ - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-s", srcUDNMasqueradePrefix, - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - ) - return rules -} - -func getLocalGatewayNATRules(cidr *net.IPNet) []nodeipt.Rule { - // Allow packets to/from the gateway interface in case defaults deny - protocol := getIPTablesProtocol(cidr.IP.String()) - masqueradeIP := config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP - if protocol == iptables.ProtocolIPv6 { - masqueradeIP = config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP - } - rules := append( - []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{ - "-s", masqueradeIP.String(), - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - }, - getLocalGatewayPodSubnetNATRules(cidr)..., - ) - - // FIXME(tssurya): If the feature is disabled we should be removing - // these rules - if util.IsNetworkSegmentationSupportEnabled() { - rules = append(rules, getUDNMasqueradeRules(protocol)...) - } - - return rules -} - -// initLocalGatewayNATRules sets up iptables rules for interfaces -func initLocalGatewayNATRules(ifname string, cidr *net.IPNet) error { +// initLocalGatewayIPTFilterRules sets up iptables rules for interfaces +func initLocalGatewayIPTFilterRules(ifname string, cidr *net.IPNet) error { // Insert the filter table rules because they need to be evaluated BEFORE the DROP rules // we have for forwarding. DO NOT change the ordering; specially important // during SGW->LGW rollouts and restarts. @@ -527,25 +393,8 @@ func initLocalGatewayNATRules(ifname string, cidr *net.IPNet) error { if err != nil { return fmt.Errorf("unable to insert forwarding rules %v", err) } - // append the masquerade rules in POSTROUTING table since that needs to be - // evaluated last. - return appendIptRules(getLocalGatewayNATRules(cidr)) -} - -func addLocalGatewayPodSubnetNATRules(cidrs ...*net.IPNet) error { - var rules []nodeipt.Rule - for _, cidr := range cidrs { - rules = append(rules, getLocalGatewayPodSubnetNATRules(cidr)...) - } - return appendIptRules(rules) -} - -func delLocalGatewayPodSubnetNATRules(cidrs ...*net.IPNet) error { - var rules []nodeipt.Rule - for _, cidr := range cidrs { - rules = append(rules, getLocalGatewayPodSubnetNATRules(cidr)...) - } - return deleteIptRules(rules) + // NOTE: nftables masquerade rules are now handled separately in initLocalGatewayNFTNATRules + return nil } func addChaintoTable(ipt util.IPTablesHelper, tableName, chain string) { diff --git a/go-controller/pkg/node/gateway_localnet.go b/go-controller/pkg/node/gateway_localnet.go index e0cc822844..6b8ed9aa0b 100644 --- a/go-controller/pkg/node/gateway_localnet.go +++ b/go-controller/pkg/node/gateway_localnet.go @@ -17,11 +17,11 @@ import ( func initLocalGateway(hostSubnets []*net.IPNet, mgmtPort managementport.Interface) error { klog.Info("Adding iptables masquerading rules for new local gateway") - if util.IsNetworkSegmentationSupportEnabled() { - if err := ensureChain("nat", iptableUDNMasqueradeChain); err != nil { - return fmt.Errorf("failed to ensure chain %s in NAT table: %w", iptableUDNMasqueradeChain, err) - } - } + + var allCIDRs []*net.IPNet + ifName := mgmtPort.GetInterfaceName() + + // First pass: collect all CIDRs and setup iptables filter rules per interface for _, hostSubnet := range hostSubnets { // local gateway mode uses mp0 as default path for all ingress traffic into OVN nextHop, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(hostSubnet), mgmtPort.GetAddresses()) @@ -32,11 +32,21 @@ func initLocalGateway(hostSubnets []*net.IPNet, mgmtPort managementport.Interfac // add iptables masquerading for mp0 to exit the host for egress cidr := nextHop.IP.Mask(nextHop.Mask) cidrNet := &net.IPNet{IP: cidr, Mask: nextHop.Mask} - ifName := mgmtPort.GetInterfaceName() - if err := initLocalGatewayNATRules(ifName, cidrNet); err != nil { + allCIDRs = append(allCIDRs, cidrNet) + + // Setup iptables filter rules for this interface/CIDR + if err := initLocalGatewayIPTFilterRules(ifName, cidrNet); err != nil { return fmt.Errorf("failed to add local NAT rules for: %s, err: %v", ifName, err) } } + + // setup nftables masquerade rules for all CIDRs (v4, v6 or dualstack) + if len(allCIDRs) > 0 { + if err := initLocalGatewayNFTNATRules(allCIDRs...); err != nil { + return fmt.Errorf("failed to setup nftables masquerade rules: %w", err) + } + } + return nil } diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index 842bb417d1..c2de7aa5e7 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -6,12 +6,14 @@ package node import ( "context" "fmt" + "net" "strings" corev1 "k8s.io/api/core/v1" utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -27,6 +29,13 @@ import ( // use an "accept" rule to override a later "drop" rule), then those rules will need to // either both be iptables or both be nftables. +// nftables chain names +const ( + nftablesLocalGatewayMasqChain = "ovn-kube-local-gw-masq" + nftablesPodSubnetMasqChain = "ovn-kube-pod-subnet-masq" + nftablesUDNMasqChain = "ovn-kube-udn-masq" +) + // getNoSNATNodePortRules returns elements to add to the "mgmtport-no-snat-nodeports" // set to prevent SNAT of sourceIP when passing through the management port, for an // `externalTrafficPolicy: Local` service with NodePorts. @@ -186,3 +195,293 @@ func getUDNNFTRules(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNCo } return rules } + +// getLocalGatewayPodSubnetMasqueradeNFTRule creates a rule for masquerading traffic from the pod subnet CIDR +// in local gateway node in a seperate chain which is then called from local gateway masquerade chain. +// +// chain ovn-kube-pod-subnet-masq { +// ip saddr 10.244.0.0/24 masquerade +// ip6 saddr fd00:10:244:1::/64 masquerade +// } +func getLocalGatewayPodSubnetMasqueradeNFTRule(cidr *net.IPNet) (*knftables.Rule, error) { + // Create the rule for masquerading traffic from the CIDR + ipPrefix := "ip" + if utilnet.IsIPv6CIDR(cidr) { + ipPrefix = "ip6" + } + + rule := &knftables.Rule{ + Rule: knftables.Concat( + ipPrefix, "saddr", cidr, + "masquerade", + ), + Chain: nftablesPodSubnetMasqChain, + } + + return rule, nil +} + +// getLocalGatewayNATNFTRules returns the nftables rules for local gateway NAT including masquerade IP rule, +// pod subnet rules, and UDN masquerade rules (if network segmentation is enabled). +// This function supports dual-stack by accepting multiple CIDRs and generating rules for all IP families. +// +// chain ovn-kube-local-gw-masq { +// comment "OVN local gateway masquerade" +// type nat hook postrouting priority srcnat; policy accept; +// ip saddr 169.254.0.1 masquerade +// ip6 saddr fd69::1 masquerade +// jump ovn-kube-pod-subnet-masq +// jump ovn-kube-udn-masq +// } +func getLocalGatewayNATNFTRules(cidrs ...*net.IPNet) ([]*knftables.Rule, error) { + var rules []*knftables.Rule + + // Process each CIDR to support dual-stack + for _, cidr := range cidrs { + // Determine IP version and masquerade IP + isIPv6 := utilnet.IsIPv6CIDR(cidr) + var masqueradeIP net.IP + var ipPrefix string + if isIPv6 { + masqueradeIP = config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP + ipPrefix = "ip6" + } else { + masqueradeIP = config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP + ipPrefix = "ip" + } + + // Rule1: Masquerade IP rule for the main chain + masqRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + ipPrefix, "saddr", masqueradeIP, + "masquerade", + ), + } + rules = append(rules, masqRule) + + // Rule2: Pod subnet NAT rule for the pod subnet chain + podSubnetRule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr) + if err != nil { + return nil, fmt.Errorf("failed to create pod subnet masquerade rule: %w", err) + } + rules = append(rules, podSubnetRule) + } + + // Rule 3: UDN masquerade rules (if network segmentation is enabled) + if util.IsNetworkSegmentationSupportEnabled() { + if config.IPv4Mode { + udnRules, err := getUDNMasqueradeNFTRules(utilnet.IPv4) + if err != nil { + return nil, fmt.Errorf("failed to create IPv4 UDN masquerade rules: %w", err) + } + rules = append(rules, udnRules...) + } + if config.IPv6Mode { + udnRules, err := getUDNMasqueradeNFTRules(utilnet.IPv6) + if err != nil { + return nil, fmt.Errorf("failed to create IPv6 UDN masquerade rules: %w", err) + } + rules = append(rules, udnRules...) + } + } + + return rules, nil +} + +// getUDNMasqueradeNFTRules returns the nftables rules for UDN masquerade. +// Chain creation is handled separately by setupLocalGatewayNATNFTRules. +// +// chain ovn-kube-udn-masq { +// comment "OVN UDN masquerade" +// ip saddr != 169.254.0.0/29 ip daddr != 10.96.0.0/16 ip saddr 169.254.0.0/17 masquerade +// ip6 saddr != fd69::/125 ip daddr != fd00:10:96::/112 ip6 saddr fd69::/112 masquerade +// } +func getUDNMasqueradeNFTRules(ipFamily utilnet.IPFamily) ([]*knftables.Rule, error) { + var rules []*knftables.Rule + + // Determine subnet and IP family + srcUDNMasqueradePrefix := config.Gateway.V4MasqueradeSubnet + ipPrefix := "ip" + if ipFamily == utilnet.IPv6 { + srcUDNMasqueradePrefix = config.Gateway.V6MasqueradeSubnet + ipPrefix = "ip6" + } + + // Calculate reserved masquerade prefix (first 8 IPs) + _, ipnet, err := net.ParseCIDR(srcUDNMasqueradePrefix) + if err != nil { + return nil, fmt.Errorf("failed to parse UDN masquerade subnet: %w", err) + } + _, prefixLen := ipnet.Mask.Size() + defaultNetworkReservedMasqueradePrefix := fmt.Sprintf("%s/%d", ipnet.IP.String(), prefixLen-3) + + // Rule: RETURN for reserved masquerade prefix and service CIDRs + // rest of the traffic is masqueraded + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IPFamilyOfCIDR(svcCIDR) != ipFamily { + continue + } + masqueradeRule := &knftables.Rule{ + Chain: nftablesUDNMasqChain, + Rule: knftables.Concat( + ipPrefix, "saddr", "!=", defaultNetworkReservedMasqueradePrefix, // this guarantees we don't SNAT default network masqueradeIPs + ipPrefix, "daddr", "!=", svcCIDR, // this guarantees we don't SNAT service traffic + ipPrefix, "saddr", srcUDNMasqueradePrefix, // this guarantees we SNAT all UDN MasqueradeIPs traffic leaving the node + "masquerade", + ), + } + rules = append(rules, masqueradeRule) + } + + return rules, nil +} + +// initLocalGatewayNFTNATRules sets up nftables rules for local gateway NAT functionality +// This function supports dual-stack by accepting multiple CIDRs and generating rules for all IP families +func initLocalGatewayNFTNATRules(cidrs ...*net.IPNet) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + // Create transaction and apply all chains and rules + tx := nft.NewTransaction() + + // Create main local gateway masquerade chain + localGwMasqChain := &knftables.Chain{ + Name: nftablesLocalGatewayMasqChain, + Comment: knftables.PtrTo("OVN local gateway masquerade"), + Type: knftables.PtrTo(knftables.NATType), + Hook: knftables.PtrTo(knftables.PostroutingHook), + Priority: knftables.PtrTo(knftables.SNATPriority), + } + tx.Add(localGwMasqChain) + + // Create dedicated pod subnet masquerade chain + podSubnetMasqChain := &knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + } + tx.Add(podSubnetMasqChain) + + // Create UDN masquerade chain only if network segmentation is enabled + var udnMasqChain *knftables.Chain + if util.IsNetworkSegmentationSupportEnabled() { + udnMasqChain = &knftables.Chain{ + Name: nftablesUDNMasqChain, + Comment: knftables.PtrTo("OVN UDN masquerade"), + } + tx.Add(udnMasqChain) + } + + // Flush existing chains to ensure clean state + tx.Flush(localGwMasqChain) + tx.Flush(podSubnetMasqChain) + if util.IsNetworkSegmentationSupportEnabled() { + tx.Flush(udnMasqChain) + } + + // Get the existing local gateway NAT rules + localGwRules, err := getLocalGatewayNATNFTRules(cidrs...) + if err != nil { + return fmt.Errorf("failed to get local gateway NAT rules: %w", err) + } + + // Add the main local gateway NAT rules + for _, rule := range localGwRules { + tx.Add(rule) + } + + // Add jump rule from main chain to pod subnet chain + jumpToPodSubnetRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + "jump", nftablesPodSubnetMasqChain, + ), + } + tx.Add(jumpToPodSubnetRule) + + // Add jump rule to UDN chain only if network segmentation is enabled + if util.IsNetworkSegmentationSupportEnabled() { + jumpToUDNRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + "jump", nftablesUDNMasqChain, + ), + } + tx.Add(jumpToUDNRule) + } + + err = nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("failed to setup local gateway NAT nftables rules: %w", err) + } + + return nil +} + +// addLocalGatewayPodSubnetNFTRules adds nftables rules for pod subnet masquerading for multiple CIDRs +// These rules are added to the dedicated pod subnet masquerade chain. +func addLocalGatewayPodSubnetNFTRules(cidrs ...*net.IPNet) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + tx := nft.NewTransaction() + + // Ensure the pod subnet chain exists + podSubnetChain := &knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + } + tx.Add(podSubnetChain) + + // Flush the chain to remove all existing rules + tx.Flush(podSubnetChain) + + for _, cidr := range cidrs { + rule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr) + if err != nil { + return fmt.Errorf("failed to create nftables rules for CIDR %s: %w", cidr.String(), err) + } + + // Add the rule + tx.Add(rule) + } + + if err := nft.Run(context.TODO(), tx); err != nil { + return fmt.Errorf("failed to add pod subnet NAT rules: %w", err) + } + + return nil +} + +// delLocalGatewayPodSubnetNFTRules removes nftables rules for pod subnet masquerading for multiple CIDRs +// Since we use a separate chain, we can simply flush it to remove all pod subnet rules. +func delLocalGatewayPodSubnetNFTRules() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + tx := nft.NewTransaction() + + // In shared gateway mode, this chain might not exist if its + // not migration from local gateway mode. In that case, let's + // use the idiomatic way of adding the chain before trying to flush it. + // I anyways also have the knftables.IsNotFound() check in the caller later. + tx.Add(&knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + }) + + // Simply flush the dedicated pod subnet masquerade chain + // This removes all pod subnet masquerade rules at once + tx.Flush(&knftables.Chain{Name: nftablesPodSubnetMasqChain}) + + if err := nft.Run(context.TODO(), tx); err != nil && !knftables.IsNotFound(err) { + return fmt.Errorf("failed to delete pod subnet NAT rules: %w", err) + } + + return nil +} From a67872dc389b80d65029c8a01c56299ed39240b6 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 9 Jul 2025 11:11:50 +0200 Subject: [PATCH 166/181] rename/reuse pmtud nft sets to remote-node-ips let's reuse the pmtud address-set ips of the remote nodes ips also for bgp advertised networks cSNAT Signed-off-by: Surya Seetharaman --- .../node/default_node_network_controller.go | 18 +++++------ .../default_node_network_controller_test.go | 32 +++++++++---------- go-controller/pkg/node/node_nftables.go | 12 +++---- go-controller/pkg/types/const.go | 8 ++--- 4 files changed, 35 insertions(+), 35 deletions(-) diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index f1281980a8..db7d26802d 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -188,7 +188,7 @@ func NewDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, net nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() if err != nil { return nil, fmt.Errorf("failed to setup PMTUD nftables sets: %w", err) } @@ -1528,12 +1528,12 @@ func (nc *DefaultNodeNetworkController) addOrUpdateNode(node *corev1.Node) error klog.Infof("Adding remote node %q, IP: %s to PMTUD blocking rules", node.Name, nodeIP) if utilnet.IsIPv4(nodeIP) { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, + Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) } else { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, + Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, }) } @@ -1557,12 +1557,12 @@ func removePMTUDNodeNFTRules(nodeIPs []net.IP) error { // Remove IPs from NFT sets if utilnet.IsIPv4(nodeIP) { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, + Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) } else { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, + Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, }) } @@ -1622,21 +1622,21 @@ func (nc *DefaultNodeNetworkController) syncNodes(objs []interface{}) error { // Remove IPs from NFT sets if utilnet.IsIPv4(nodeIP) { keepNFTSetElemsV4 = append(keepNFTSetElemsV4, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, + Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) } else { keepNFTSetElemsV6 = append(keepNFTSetElemsV6, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, + Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, }) } } } - if err := recreateNFTSet(types.NFTNoPMTUDRemoteNodeIPsv4, keepNFTSetElemsV4); err != nil { + if err := recreateNFTSet(types.NFTRemoteNodeIPsv4, keepNFTSetElemsV4); err != nil { errors = append(errors, err) } - if err := recreateNFTSet(types.NFTNoPMTUDRemoteNodeIPsv6, keepNFTSetElemsV6); err != nil { + if err := recreateNFTSet(types.NFTRemoteNodeIPsv6, keepNFTSetElemsV6); err != nil { errors = append(errors, err) } diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index a1413a7dd1..24c3141357 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -38,18 +38,18 @@ import ( const v4PMTUDNFTRules = ` add table inet ovn-kubernetes -add rule inet ovn-kubernetes no-pmtud ip daddr @no-pmtud-remote-node-ips-v4 meta l4proto icmp icmp type 3 icmp code 4 counter drop +add rule inet ovn-kubernetes no-pmtud ip daddr @remote-node-ips-v4 meta l4proto icmp icmp type 3 icmp code 4 counter drop add chain inet ovn-kubernetes no-pmtud { type filter hook output priority 0 ; comment "Block egress needs frag/packet too big to remote k8s nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } ` const v6PMTUDNFTRules = ` add table inet ovn-kubernetes -add rule inet ovn-kubernetes no-pmtud meta l4proto icmpv6 icmpv6 type 2 icmpv6 code 0 ip6 daddr @no-pmtud-remote-node-ips-v6 counter drop +add rule inet ovn-kubernetes no-pmtud meta l4proto icmpv6 icmpv6 type 2 icmpv6 code 0 ip6 daddr @remote-node-ips-v6 counter drop add chain inet ovn-kubernetes no-pmtud { type filter hook output priority 0 ; comment "Block egress needs frag/packet too big to remote k8s nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } ` var _ = Describe("Node", func() { @@ -806,7 +806,7 @@ var _ = Describe("Node", func() { cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) @@ -830,7 +830,7 @@ var _ = Describe("Node", func() { err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v4PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } +add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.254.61 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -911,7 +911,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) @@ -935,7 +935,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v4PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } +add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.253.61 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1058,7 +1058,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) @@ -1082,7 +1082,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v6PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } +add element inet ovn-kubernetes remote-node-ips-v6 { 2001:db8:1::4 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1162,7 +1162,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) @@ -1186,7 +1186,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v6PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } +add element inet ovn-kubernetes remote-node-ips-v6 { 2002:db8:1::4 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1323,7 +1323,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) @@ -1444,7 +1444,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/node_nftables.go b/go-controller/pkg/node/node_nftables.go index e52a8970a4..ca4afc9ac2 100644 --- a/go-controller/pkg/node/node_nftables.go +++ b/go-controller/pkg/node/node_nftables.go @@ -13,8 +13,8 @@ import ( const nftPMTUDChain = "no-pmtud" -// setupPMTUDNFTSets sets up the NFT sets that contain remote Kubernetes node IPs -func setupPMTUDNFTSets() error { +// setupRemoteNodeNFTSets sets up the NFT sets that contain remote Kubernetes node IPs +func setupRemoteNodeNFTSets() error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return fmt.Errorf("failed to get nftables helper: %w", err) @@ -22,12 +22,12 @@ func setupPMTUDNFTSets() error { tx := nft.NewTransaction() tx.Add(&knftables.Set{ - Name: types.NFTNoPMTUDRemoteNodeIPsv4, + Name: types.NFTRemoteNodeIPsv4, Comment: knftables.PtrTo("Block egress ICMP needs frag to remote Kubernetes nodes"), Type: "ipv4_addr", }) tx.Add(&knftables.Set{ - Name: types.NFTNoPMTUDRemoteNodeIPsv6, + Name: types.NFTRemoteNodeIPsv6, Comment: knftables.PtrTo("Block egress ICMPv6 packet too big to remote Kubernetes nodes"), Type: "ipv6_addr", }) @@ -68,7 +68,7 @@ func setupPMTUDNFTChain() error { tx.Add(&knftables.Rule{ Chain: nftPMTUDChain, Rule: knftables.Concat( - "ip daddr @"+types.NFTNoPMTUDRemoteNodeIPsv4, + "ip daddr @"+types.NFTRemoteNodeIPsv4, "meta l4proto icmp", "icmp type 3", // type 3 == Destination Unreachable "icmp code 4", // code 4 indicates fragmentation needed @@ -85,7 +85,7 @@ func setupPMTUDNFTChain() error { "meta l4proto icmpv6", // match on ICMPv6 packets "icmpv6 type 2", // type 2 == Packet Too Big (PMTUD) "icmpv6 code 0", // code 0 for that message - "ip6 daddr @"+types.NFTNoPMTUDRemoteNodeIPsv6, + "ip6 daddr @"+types.NFTRemoteNodeIPsv6, counterIfDebug, "drop", // drop the packet ), diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 8ba7269cad..523da8e27b 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -312,13 +312,13 @@ const ( // CUDNPrefix of all CUDN network names CUDNPrefix = "cluster_udn_" - // NFTNoPMTUDRemoteNodeIPsv4 is a set used to track remote node IPs that do not belong to + // NFTRemoteNodeIPsv4 is a set used to track remote node v4IPs that do not belong to // the local node's subnet. - NFTNoPMTUDRemoteNodeIPsv4 = "no-pmtud-remote-node-ips-v4" + NFTRemoteNodeIPsv4 = "remote-node-ips-v4" - // NFTNoPMTUDRemoteNodeIPsv6 is a set used to track remote node IPs that do not belong to + // NFTRemoteNodeIPsv6 is a set used to track remote node v6IPs that do not belong to // the local node's subnet. - NFTNoPMTUDRemoteNodeIPsv6 = "no-pmtud-remote-node-ips-v6" + NFTRemoteNodeIPsv6 = "remote-node-ips-v6" // Metrics MetricOvnkubeNamespace = "ovnkube" From 04d48c314db6568287855f6d24cec14c9347e7f6 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 9 Jul 2025 11:38:57 +0200 Subject: [PATCH 167/181] BGP, default network, LGW: Conditionally Masquerade This commit is valid only for default networks as mentioned in title. It's because unlike in UDNs where we do cSNATs in OVN on router at the edge before it leaves to node, for CDN everything happens on the node side already - so we can leverage the nodeIP masquerade bits. if network is advertised: chain ovn-kube-pod-subnet-masq { ip saddr 10.244.2.0/24 ip daddr @remote-node-ips-v4 masquerade ip6 saddr fd00:10:244:3::/64 ip6 daddr @remote-node-ips-v6 masquerade } else: chain ovn-kube-pod-subnet-masq { ip saddr 10.244.2.0/24 masquerade ip6 saddr fd00:10:244:3::/64 masquerade } Signed-off-by: Surya Seetharaman --- go-controller/pkg/node/gateway.go | 4 +-- go-controller/pkg/node/gateway_nftables.go | 33 ++++++++++++++++++---- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 7f11a0b813..fa812377e7 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -521,9 +521,9 @@ func (g *gateway) addAllServices() []error { func (g *gateway) updateSNATRules() error { subnets := util.IPsToNetworkIPs(g.nodeIPManager.mgmtPort.GetAddresses()...) - if g.GetDefaultPodNetworkAdvertised() || config.Gateway.Mode != config.GatewayModeLocal { + if config.Gateway.Mode != config.GatewayModeLocal { return delLocalGatewayPodSubnetNFTRules() } - return addLocalGatewayPodSubnetNFTRules(subnets...) + return addOrUpdateLocalGatewayPodSubnetNFTRules(g.GetDefaultPodNetworkAdvertised(), subnets...) } diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index c2de7aa5e7..71a4d23b9e 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -203,16 +203,32 @@ func getUDNNFTRules(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNCo // ip saddr 10.244.0.0/24 masquerade // ip6 saddr fd00:10:244:1::/64 masquerade // } -func getLocalGatewayPodSubnetMasqueradeNFTRule(cidr *net.IPNet) (*knftables.Rule, error) { +// +// If isAdvertisedNetwork is true, masquerade only when destination matches remote node IPs. +// Rules look like: +// ip saddr 10.244.0.0/24 ip daddr @remote-node-ips-v4 masquerade +// ip6 saddr fd00:10:244:1::/64 ip6 daddr @remote-node-ips-v6 masquerade +func getLocalGatewayPodSubnetMasqueradeNFTRule(cidr *net.IPNet, isAdvertisedNetwork bool) (*knftables.Rule, error) { // Create the rule for masquerading traffic from the CIDR - ipPrefix := "ip" + var ipPrefix string + var remoteNodeSetName string if utilnet.IsIPv6CIDR(cidr) { ipPrefix = "ip6" + remoteNodeSetName = types.NFTRemoteNodeIPsv6 + } else { + ipPrefix = "ip" + remoteNodeSetName = types.NFTRemoteNodeIPsv4 } + // If network is advertised, only masquerade if destination is a remote node IP + var optionalDestRules []string + if isAdvertisedNetwork { + optionalDestRules = []string{ipPrefix, "daddr", "@", remoteNodeSetName} + } rule := &knftables.Rule{ Rule: knftables.Concat( ipPrefix, "saddr", cidr, + optionalDestRules, "masquerade", ), Chain: nftablesPodSubnetMasqChain, @@ -261,7 +277,7 @@ func getLocalGatewayNATNFTRules(cidrs ...*net.IPNet) ([]*knftables.Rule, error) rules = append(rules, masqRule) // Rule2: Pod subnet NAT rule for the pod subnet chain - podSubnetRule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr) + podSubnetRule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr, false) if err != nil { return nil, fmt.Errorf("failed to create pod subnet masquerade rule: %w", err) } @@ -421,9 +437,12 @@ func initLocalGatewayNFTNATRules(cidrs ...*net.IPNet) error { return nil } -// addLocalGatewayPodSubnetNFTRules adds nftables rules for pod subnet masquerading for multiple CIDRs +// addOrUpdateLocalGatewayPodSubnetNFTRules adds nftables rules for pod subnet masquerading for multiple CIDRs // These rules are added to the dedicated pod subnet masquerade chain. -func addLocalGatewayPodSubnetNFTRules(cidrs ...*net.IPNet) error { +// If the rules already exist, they are updated. +// If isAdvertisedNetwork is true, the masquerade rules also get a destination match +// that matches the remote node IP set. +func addOrUpdateLocalGatewayPodSubnetNFTRules(isAdvertisedNetwork bool, cidrs ...*net.IPNet) error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return fmt.Errorf("failed to get nftables helper: %w", err) @@ -438,10 +457,12 @@ func addLocalGatewayPodSubnetNFTRules(cidrs ...*net.IPNet) error { tx.Add(podSubnetChain) // Flush the chain to remove all existing rules + // if network toggles between advertised and non-advertised, we need to flush the chain and re-add correct rules tx.Flush(podSubnetChain) + // Add the new rules for each CIDR for _, cidr := range cidrs { - rule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr) + rule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr, isAdvertisedNetwork) if err != nil { return fmt.Errorf("failed to create nftables rules for CIDR %s: %w", cidr.String(), err) } From 8a65723f1d90d2360c9a3f965e89eb124c026e3b Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 2 Jul 2025 11:16:52 +0200 Subject: [PATCH 168/181] Add E2E's for these traffic flows 1) remove the l2 failure limitation since we now use nodeIPs reply knows how to go back to src node since we have routes for that 2) add udn pod -> default network nodeport service (same and diff node) 3) add udn pod -> udn network nodeport service (same and diff node) - same network 4) add udn pod -> udn network nodeport service (same and diff node) - different network Signed-off-by: Surya Seetharaman Signed-off-by: Surya Seetharaman --- test/e2e/route_advertisements.go | 113 ++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 23 deletions(-) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index f65dd60631..d46d0a9409 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -28,7 +28,6 @@ import ( "github.com/ovn-org/ovn-kubernetes/test/e2e/label" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -626,7 +625,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" } // create host networked Pods - _, err := createPod(f, node.Name+"-hostnet-ep", node.Name, f.Namespace.Name, []string{}, map[string]string{}, func(p *v1.Pod) { + _, err := createPod(f, node.Name+"-hostnet-ep", node.Name, f.Namespace.Name, []string{}, map[string]string{}, func(p *corev1.Pod) { p.Spec.Containers[0].Args = args p.Spec.HostNetwork = true }) @@ -652,6 +651,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" svc.Spec.Ports = []corev1.ServicePort{{Port: 8080}} familyPolicy := corev1.IPFamilyPolicyPreferDualStack svc.Spec.IPFamilyPolicy = &familyPolicy + svc.Spec.Type = corev1.ServiceTypeNodePort svcNetA, err = f.ClientSet.CoreV1().Services(pod.Namespace).Create(context.Background(), svc, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -675,6 +675,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" svc.Name = fmt.Sprintf("service-default") svc.Namespace = "default" svc.Spec.Selector = pod.Labels + svc.Spec.Type = corev1.ServiceTypeNodePort svcNetDefault, err = f.ClientSet.CoreV1().Services(pod.Namespace).Create(context.Background(), svc, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -754,6 +755,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" } if svcNetDefault != nil { err = f.ClientSet.CoreV1().Services(svcNetDefault.Namespace).Delete(context.Background(), svcNetDefault.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcNetDefault = nil } @@ -954,11 +956,11 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 // 10:59:55.352404 ovn-k8s-mp87 In ifindex 186 0a:58:5d:5d:01:01 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 57264, // offset 0, flags [DF], proto TCP (6), length 60) - // 93.93.1.5.36363 > 172.18.0.2.25022: Flags [S], cksum 0xe0b7 (correct), seq 3879759281, win 65280, + // 169.154.169.12.36363 > 172.18.0.2.25022: Flags [S], cksum 0xe0b7 (correct), seq 3879759281, win 65280, // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 // 10:59:55.352461 ovn-k8s-mp87 Out ifindex 186 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, // offset 0, flags [DF], proto TCP (6), length 40) - // 172.18.0.2.25022 > 93.93.1.5.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 3879759282, win 0, length 0 + // 172.18.0.2.25022 > 169.154.169.12.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 3879759282, win 0, length 0 // 10:59:55.352927 319594f193d4d_3 Out ifindex 191 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, // offset 0, flags [DF], proto TCP (6), length 40) // 172.18.0.2.25022 > 93.93.1.5.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 1, win 0, length 0 @@ -971,25 +973,90 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), podsNetA[2].Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodeIP := node.Status.Addresses[ipFamilyIndex].Address - errBool := false - out := "" - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - // FIXME: this should be removed once we add the SNAT for pod->node traffic - // We now permit asymmetric traffic on LGW. This prevents the issue from occurring with IPv6. - // However, for IPv4 LGW rp_filter is still blocking the replies. - // The situation is different on SGW as we don't allow asymmetric traffic at all, which is why IPv6 traffic fails there too. - if ipFamilyIndex == ipFamilyV4 || !isLocalGWModeEnabled() { - // FIXME: fix assymmetry in L2 UDNs - // bad behaviour: packet is coming from other node -> entering eth0 -> bretho and here kernel drops the packet since - // rp_filter is set to 1 in breth0 and there is an iprule that sends the packet to mpX interface so kernel sees the packet - // having return path different from the incoming interface. - // The SNAT to nodeIP should fix this. - // this causes curl timeout with code 28 - errBool = true - out = curlConnectionTimeoutCode - } - } - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", out, errBool + + clientNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + clientNodeIP := clientNode.Status.Addresses[ipFamilyIndex].Address + // pod -> node traffic should use the node's IP as the source for advertised UDNs. + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/clientip", clientNodeIP, false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in default network should work (should it? :)...)", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] is on nodes[0]. We need the same node. Let's hit the nodeport on nodes[0]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetDefault.Spec.Ports[0].NodePort + + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to a different node nodeport service in default network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] is on nodes[0]. We need a different node. podNetDefault is on nodes[1]. + // The service is backed by podNetDefault. Let's hit the nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetDefault.Spec.Ports[0].NodePort + + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in same UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by pods in podsNetA. + // We want to hit the nodeport on the same node. + // client is on nodes[0]. Let's hit nodeport on nodes[0]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetA.Spec.Ports[0].NodePort + + // The service can be backed by any of the pods in podsNetA, so we can't reliably check the output hostname. + // Just check that the connection is successful. + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to a different node nodeport service in same UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by pods in podsNetA. + // We want to hit the nodeport on a different node. + // client is on nodes[0]. Let's hit nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetA.Spec.Ports[0].NodePort + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in different UDN network should not work", + // FIXME: This test should work: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5419 + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetB.Spec.Ports[0].NodePort + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", curlConnectionTimeoutCode, true + }), + ginkgo.Entry("UDN pod to a different node nodeport service in different UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by podNetB. + // We want to hit the nodeport on a different node from the client. + // client is on nodes[0]. Let's hit nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetB.Spec.Ports[0].NodePort + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false }), ) From 10ea4ab4a2222577f54b03d84491c6abd7e17d23 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Fri, 11 Jul 2025 14:01:33 +0200 Subject: [PATCH 169/181] Add masqueradeIP flows back for advertised networks in breth0 In the previous commits we added SNATing to nodeIP for the following traffic flows: pod -> nodes pod -> nodeports when pods are part of advertised networks. Prior to SNATing to nodeIPs they are SNATed at the ovn_cluster_router to masqueradeIP before being sent into the host. In commit https://github.com/ovn-kubernetes/ovn-kubernetes/commit/75dd73fb645bff6c30e8c08c9b7f711d82996601 we had converted all UDN flows that matched on masqueradeIP as the source on breth0 for UDN pods to services traffic flow to instead match on the podsubnets. However given we have pod to node and pod to nodeport traffic flows using masqueradeIP as the SNAT we need to now re-add the masqueradeIP flows as well to ensure that nodeports isolation between UDNs work correctly. Before this commit: In LGW/SGW flow is: UDN pod -> samenodeIP:nodeport in default network -> SNATed to masqueradeIP of that UDN -> sent to host -> SNATed to clusterIP -> hits the default flow in table=2 in br-ex: cookie=0xdeff105, duration=15690.053s, table=2, n_packets=0, n_bytes=0, idle_age=15690, priority=100 actions=mod_dl_dst:6e:4d:97:c0:3c:97,output:2 and sends to patch port of default network and this traffic starts working when it shouldn't. (I mean eventually we want this to work, see https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 but that's a future issue - outside my PR's scope) In case of L3 UDN advertised pod -> nodeport service in default or other UDN network: https://github.com/ovn-kubernetes/ovn-kubernetes/pull/4705/commits/d63887ed167da260d3f26c71ec06e520d89a4b0f is the commit where we added logic to match on srcIP of the traffic and accordingly route it into the respective UDN patchports. So there we use the masqueradeIP of a particular UDN to determine what the source of the traffic was and route it into that particular UDN's patchport where it would backhole if there was no matching clusterIP NAT entry there, and this is how isolation was guaranteed. Recently this was changed to a hard drop: https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5351/commits/dcc403c1ddf11e30e6990699616405f6dc47dd71 For l2 topology the logic is same as above for clusterIPs but for nodeports the GR itself drops the packets destined towards the other networks as there is no LB entry present on the GR as the destination IP is that of the router itself. That's how isolation works there: sample trace: next; 10. ls_out_apply_port_sec (northd.c:6039): 1, priority 0, uuid 2aa6ebd5 output; /* output to "stor-cluster_udn_tenant.blue.network_ovn_layer2_switch", type "l3gateway" */ ingress(dp="GR_cluster_udn_tenant.blue.network_ovn-worker2", inport="rtos-cluster_udn_tenant.blue.network_ovn_layer2_switch") ----------------------------------------------------------------------------------------------------------------------------- 0. lr_in_admission (northd.c:13232): eth.dst == 0a:58:64:41:00:03 && inport == "rtos-cluster_udn_tenant.blue.network_ovn_layer2_switch", priority 50, uuid 7f9af183 reg9[1] = check_pkt_larger(1414); xreg0[0..47] = 0a:58:64:41:00:03; next; 1. lr_in_lookup_neighbor (northd.c:13420): 1, priority 0, uuid d2672052 reg9[2] = 1; next; 2. lr_in_learn_neighbor (northd.c:13430): reg9[2] == 1 || reg9[3] == 0, priority 100, uuid 84ca0ef4 mac_cache_use; next; 3. lr_in_ip_input (northd.c:12824): ip4.dst == {172.18.0.4}, priority 60, uuid ea41c4e7 drop; Without this fix: [FAIL] BGP: isolation between advertised networks Layer3 connectivity between networks [It] pod in the UDN should not be able to access a default network service the above test will work in LGW when it should not work like is the case for non-advertised UDNs. This commit adds back the masqueradeIP flow as well for advertised networks that drops all packets that didn't get routed on the higher priority pkt_mark flows at 250. when 2 UDNs are advertised: this PR added back these two flows with masqueradeIP match: cookie=0xdeff105, duration=127.593s, table=2, n_packets=0, n_bytes=0, priority=200,ip,nw_src=169.254.0.12 actions=drop cookie=0xdeff105, duration=127.534s, table=2, n_packets=0, n_bytes=0, priority=200,ip,nw_src=169.254.0.14 actions=drop Signed-off-by: Surya Seetharaman --- .../pkg/node/bridgeconfig/bridgeflows.go | 25 +++++++++++-------- go-controller/pkg/node/gateway_udn_test.go | 6 +++-- test/e2e/route_advertisements.go | 5 ++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index d03b88c8de..200c1540ec 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -349,13 +349,12 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have - // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. + // already been SNATed to the UDN's masquerade IP or have been marked with the UDN's packet mark. if config.IPv4Mode { for _, netConfig := range b.patchedNetConfigs() { if netConfig.IsDefaultNetwork() { continue } - srcIPOrSubnet := netConfig.V4MasqIPs.ManagementPort.IP.String() if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { var udnAdvertisedSubnets []*net.IPNet for _, clusterEntry := range netConfig.Subnets { @@ -368,9 +367,14 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) continue } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() + // In addition to the masqueradeIP based flows, we also need the podsubnet based flows for + // advertised networks since UDN pod to clusterIP is unSNATed and we need this traffic to be taken into + // the correct patch port of it's own network where it's a deadend if the clusterIP is not part of + // that UDN network and works if it is part of the UDN network. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) } // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to @@ -378,7 +382,7 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) + nodetypes.DefaultOpenFlowCookie, netConfig.V4MasqIPs.ManagementPort.IP.String())) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ @@ -393,7 +397,6 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string if netConfig.IsDefaultNetwork() { continue } - srcIPOrSubnet := netConfig.V6MasqIPs.ManagementPort.IP.String() if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { var udnAdvertisedSubnets []*net.IPNet for _, clusterEntry := range netConfig.Subnets { @@ -407,13 +410,15 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string continue } - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) } dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, srcIPOrSubnet)) + nodetypes.DefaultOpenFlowCookie, netConfig.V6MasqIPs.ManagementPort.IP.String())) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 0ab0bf573b..bd05aacd57 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -1143,7 +1143,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default + Expect(flowMap["DEFAULT"]).To(HaveLen(71)) // 18 UDN Flows, 5 advertisedUDN flows, and 2 packet mark flows (IPv4+IPv6) are added by default Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") @@ -1166,7 +1166,9 @@ var _ = Describe("UserDefinedNetworkGateway", func() { // Check flows for default network service CIDR. bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) - // Expect exactly one flow per advertised UDN for table 2 and table 0 for service isolation. + // Expect exactly two flow per advertised UDN for table 2 and table 0 for service isolation. + // but one of the flows used by advertised UDNs is already tracked and used by default UDNs hence not + // counted here but in the check above for default svc isolation flows. bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) } diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index d46d0a9409..026dfc5901 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -980,7 +980,8 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" // pod -> node traffic should use the node's IP as the source for advertised UDNs. return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/clientip", clientNodeIP, false }), - ginkgo.Entry("UDN pod to the same node nodeport service in default network should work (should it? :)...)", + ginkgo.Entry("UDN pod to the same node nodeport service in default network should not work", + // FIXME: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // podsNetA[0] is on nodes[0]. We need the same node. Let's hit the nodeport on nodes[0]. @@ -989,7 +990,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" nodeIP := node.Status.Addresses[ipFamilyIndex].Address nodePort := svcNetDefault.Spec.Ports[0].NodePort - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", curlConnectionTimeoutCode, true }), ginkgo.Entry("UDN pod to a different node nodeport service in default network should work", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { From 8f5b3d4688db4ba150431a41c49f9a5e569d8228 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Sat, 12 Jul 2025 21:12:52 +0200 Subject: [PATCH 170/181] Change priority of ovn-kube-local-gw-masq to 101 Currently there are two bugs around using priority 100 for ovn-kube-local-gw-masq chain. EgressIPs multinic rules are still in legacy IPT: [0:0] -A OVN-KUBE-EGRESS-IP-MULTI-NIC -s 10.244.2.6/32 -o eth1 -j SNAT --to-source 10.10.10.105 [0:0] -A OVN-KUBE-EGRESS-IP-MULTI-NIC -s 10.244.0.3/32 -o eth1 -j SNAT --to-source 10.10.10.105 [1:60] -A OVN-KUBE-EGRESS-IP-MULTI-NIC -s 10.244.1.3/32 -o eth1 -j SNAT --to-source 10.10.10.105 and in netfilter the priority of NAT POSTROUTNG HOOK is 100 and not configurable. NF_IP_PRI_NAT_SRC in netfilter and for NFTables its the same value 100 for NAT POSTROUTING hook and its called "srcnat" in knftables and set to 100. and this is the priority used by egress service feature since that is already converted to NFT: chain egress-services { type nat hook postrouting priority srcnat; policy accept; meta mark 0x000003f0 return comment "DoNotSNAT" snat ip to ip saddr map @egress-service-snat-v4 snat ip6 to ip6 saddr map @egress-service-snat-v6 } and now that we have converted POSTROUTING rules for local-gw as well to NFT, those rules were already at priority 100. Unlike IPT rules where we could jump to EIP and ESVC chains before masquerade rules got hit, here those chains in NFT are all parallel at same priority 100 and we don't know which one will be hit first. Hence we need to change the priority of ovn-kube-local-gw-masq so that EIP/ESVC rules are hit before the default masquerade rules W/O this change EIP/ESVC tests fail in CI Signed-off-by: Surya Seetharaman --- go-controller/pkg/node/gateway_init_linux_test.go | 2 +- go-controller/pkg/node/gateway_nftables.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 9bc0cc5401..06fe88aace 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -87,7 +87,7 @@ add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-udn-masq const baseLGWNFTablesRules = ` add rule inet ovn-kubernetes ovn-kube-local-gw-masq ip saddr 169.254.169.1 masquerade -add chain inet ovn-kubernetes ovn-kube-local-gw-masq { type nat hook postrouting priority 100 ; comment "OVN local gateway masquerade" ; } +add chain inet ovn-kubernetes ovn-kube-local-gw-masq { type nat hook postrouting priority 101 ; comment "OVN local gateway masquerade" ; } add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-pod-subnet-masq add rule inet ovn-kubernetes ovn-kube-pod-subnet-masq ip saddr 10.1.1.0/24 masquerade add chain inet ovn-kubernetes ovn-kube-pod-subnet-masq diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index 71a4d23b9e..b38f2baebb 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -366,12 +366,18 @@ func initLocalGatewayNFTNATRules(cidrs ...*net.IPNet) error { tx := nft.NewTransaction() // Create main local gateway masquerade chain + // Use priority 101 instead of defaultknftables.SNATPriority (100) to ensure + // iptables egress IP rules in OVN-KUBE-EGRESS-IP-MULTI-NIC chain run first + // this also ensure for egress-services, the + // chain egress-services { + // type nat hook postrouting priority srcnat; policy accept; + // is called before the local gateway masquerade chain localGwMasqChain := &knftables.Chain{ Name: nftablesLocalGatewayMasqChain, Comment: knftables.PtrTo("OVN local gateway masquerade"), Type: knftables.PtrTo(knftables.NATType), Hook: knftables.PtrTo(knftables.PostroutingHook), - Priority: knftables.PtrTo(knftables.SNATPriority), + Priority: knftables.PtrTo(knftables.BaseChainPriority("101")), } tx.Add(localGwMasqChain) From 659010cfd44af66c071e347ed1e2051494fec68e Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Mon, 21 Jul 2025 09:59:41 +0200 Subject: [PATCH 171/181] Add all remote nodeIPs for the PMTUD/BGP remote node NFT set Prior to this change, the remote PMTUD address sets were only considering the primary IP of the node. While that was OK for PMTUD use case perhaps but for BGP now that we reuse this address set in NFT we need to consider all the IPs on the remote nodes. So this commit changes code from using node internal IPs to using the HostCIDRs annotation Signed-off-by: Surya Seetharaman --- .../node/default_node_network_controller.go | 90 ++++++++++--------- .../default_node_network_controller_test.go | 24 +++++ go-controller/pkg/node/obj_retry_node.go | 53 ++++++----- 3 files changed, 105 insertions(+), 62 deletions(-) diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index db7d26802d..47ba8f6262 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -1515,23 +1515,32 @@ func (nc *DefaultNodeNetworkController) WatchNodes() error { func (nc *DefaultNodeNetworkController) addOrUpdateNode(node *corev1.Node) error { var nftElems []*knftables.Element var addrs []string - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } + // Use GetNodeAddresses to get all node IPs (including current node for openflow) + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + return fmt.Errorf("failed to get node addresses for node %q: %w", node.Name, err) + } + + // Process IPv4 addresses + for _, nodeIP := range ipsv4 { addrs = append(addrs, nodeIP.String()) klog.Infof("Adding remote node %q, IP: %s to PMTUD blocking rules", node.Name, nodeIP) - if utilnet.IsIPv4(nodeIP) { + // Only add to nftables if this is remote node + if node.Name != nc.name { nftElems = append(nftElems, &knftables.Element{ Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) - } else { + } + } + + // Process IPv6 addresses + for _, nodeIP := range ipsv6 { + addrs = append(addrs, nodeIP.String()) + klog.Infof("Adding remote node %q, IP: %s to PMTUD blocking rules", node.Name, nodeIP) + // Only add to nftables if this is remote node + if node.Name != nc.name { nftElems = append(nftElems, &knftables.Element{ Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, @@ -1578,18 +1587,18 @@ func removePMTUDNodeNFTRules(nodeIPs []net.IP) error { func (nc *DefaultNodeNetworkController) deleteNode(node *corev1.Node) { gw := nc.Gateway.(*gateway) gw.openflowManager.deleteFlowsByKey(getPMTUDKey(node.Name)) - ipsToRemove := make([]net.IP, 0) - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } - ipsToRemove = append(ipsToRemove, nodeIP) + + // Use GetNodeAddresses to get node IPs + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + klog.Errorf("Failed to get node addresses for node %q: %v", node.Name, err) + return } + ipsToRemove := make([]net.IP, 0, len(ipsv4)+len(ipsv6)) + ipsToRemove = append(ipsToRemove, ipsv4...) + ipsToRemove = append(ipsToRemove, ipsv6...) + klog.Infof("Deleting NFT elements for node: %s", node.Name) if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { klog.Errorf("Failed to delete nftables rules for PMTUD blocking for node %q: %v", node.Name, err) @@ -1610,27 +1619,28 @@ func (nc *DefaultNodeNetworkController) syncNodes(objs []interface{}) error { if node.Name == nc.name { continue } - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } - // Remove IPs from NFT sets - if utilnet.IsIPv4(nodeIP) { - keepNFTSetElemsV4 = append(keepNFTSetElemsV4, &knftables.Element{ - Set: types.NFTRemoteNodeIPsv4, - Key: []string{nodeIP.String()}, - }) - } else { - keepNFTSetElemsV6 = append(keepNFTSetElemsV6, &knftables.Element{ - Set: types.NFTRemoteNodeIPsv6, - Key: []string{nodeIP.String()}, - }) - } + // Use GetNodeAddresses to get node IPs + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + klog.Errorf("Failed to get node addresses for node %q: %v", node.Name, err) + continue + } + + // Process IPv4 addresses + for _, nodeIP := range ipsv4 { + keepNFTSetElemsV4 = append(keepNFTSetElemsV4, &knftables.Element{ + Set: types.NFTRemoteNodeIPsv4, + Key: []string{nodeIP.String()}, + }) + } + + // Process IPv6 addresses + for _, nodeIP := range ipsv6 { + keepNFTSetElemsV6 = append(keepNFTSetElemsV6, &knftables.Element{ + Set: types.NFTRemoteNodeIPsv6, + Key: []string{nodeIP.String()}, + }) } } if err := recreateNFTSet(types.NFTRemoteNodeIPsv4, keepNFTSetElemsV4); err != nil { diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index 24c3141357..366ee881d6 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -755,6 +755,9 @@ var _ = Describe("Node", func() { node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -769,6 +772,9 @@ var _ = Describe("Node", func() { otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherNodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -860,6 +866,9 @@ add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.254.61 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -874,6 +883,9 @@ add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.254.61 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherSubnetNodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1007,6 +1019,9 @@ add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.253.61 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1021,6 +1036,9 @@ add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.253.61 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherNodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1111,6 +1129,9 @@ add element inet ovn-kubernetes remote-node-ips-v6 { 2001:db8:1::4 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1125,6 +1146,9 @@ add element inet ovn-kubernetes remote-node-ips-v6 { 2001:db8:1::4 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherSubnetNodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ diff --git a/go-controller/pkg/node/obj_retry_node.go b/go-controller/pkg/node/obj_retry_node.go index 9c9657678e..646cca2ac3 100644 --- a/go-controller/pkg/node/obj_retry_node.go +++ b/go-controller/pkg/node/obj_retry_node.go @@ -238,34 +238,43 @@ func (h *nodeEventHandler) UpdateResource(oldObj, newObj interface{}, _ bool) er return nil } - // remote node that is changing - ipsToKeep := map[string]bool{} - for _, address := range newNode.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue + if util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { + // remote node that is changing + // Use GetNodeAddresses to get new node IPs + newIPsv4, newIPsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, newNode) + if err != nil { + return fmt.Errorf("failed to get addresses for new node %q: %w", newNode.Name, err) } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue + + ipsToKeep := map[string]bool{} + for _, nodeIP := range newIPsv4 { + ipsToKeep[nodeIP.String()] = true } - ipsToKeep[nodeIP.String()] = true - } - ipsToRemove := make([]net.IP, 0) - for _, address := range oldNode.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue + for _, nodeIP := range newIPsv6 { + ipsToKeep[nodeIP.String()] = true } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue + + // Use GetNodeAddresses to get old node IPs + oldIPsv4, oldIPsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, oldNode) + if err != nil { + return fmt.Errorf("failed to get addresses for old node %q: %w", oldNode.Name, err) } - if _, exists := ipsToKeep[nodeIP.String()]; !exists { - ipsToRemove = append(ipsToRemove, nodeIP) + + ipsToRemove := make([]net.IP, 0) + for _, nodeIP := range oldIPsv4 { + if _, exists := ipsToKeep[nodeIP.String()]; !exists { + ipsToRemove = append(ipsToRemove, nodeIP) + } + } + for _, nodeIP := range oldIPsv6 { + if _, exists := ipsToKeep[nodeIP.String()]; !exists { + ipsToRemove = append(ipsToRemove, nodeIP) + } } - } - if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { - return fmt.Errorf("error removing node %q stale NFT rules during update: %w", oldNode.Name, err) + if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { + return fmt.Errorf("error removing node %q stale NFT rules during update: %w", oldNode.Name, err) + } } return h.nc.addOrUpdateNode(newNode) From 0635caef5d9a60f132b99889dd8a1376663c4294 Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Mon, 21 Jul 2025 22:42:32 +0200 Subject: [PATCH 172/181] cleanupStalePodSNATs: Don't blow all SNATs for advertised Networks Signed-off-by: Surya Seetharaman --- go-controller/pkg/ovn/gateway.go | 11 +++++------ go-controller/pkg/ovn/master_test.go | 25 ++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 66bce44dfe..d652dbcd8a 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -143,8 +143,8 @@ func WithLoadBalancerGroups(routerLBGroup, clusterLBGroup, switchLBGroup string) } // cleanupStalePodSNATs removes pod SNATs against nodeIP for the given node if -// the SNAT.logicalIP isn't an active podIP, the pod network is being advertised -// on this node or disableSNATMultipleGWs=false. We don't have to worry about +// the SNAT.logicalIP isn't an active podIP, or disableSNATMultipleGWs=false. +// We don't have to worry about // missing SNATs that should be added because addLogicalPort takes care of this // for all pods when RequestRetryObjs is called for each node add. // Other non-pod SNATs like join subnet SNATs are ignored. @@ -154,11 +154,11 @@ func WithLoadBalancerGroups(routerLBGroup, clusterLBGroup, switchLBGroup string) // pod->nodeSNATs which won't get cleared up unless explicitly deleted. // NOTE2: egressIP SNATs are synced in EIP controller. func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.IPNet, gwLRPIPs []net.IP) error { - // collect all the pod IPs for which we should be doing the SNAT; if the pod - // network is advertised or DisableSNATMultipleGWs==false we consider all + // collect all the pod IPs for which we should be doing the SNAT; + // if DisableSNATMultipleGWs==false we consider all // the SNATs stale podIPsWithSNAT := sets.New[string]() - if !gw.isRoutingAdvertised(nodeName) && config.Gateway.DisableSNATMultipleGWs { + if config.Gateway.DisableSNATMultipleGWs { pods, err := gw.watchFactory.GetAllPods() if err != nil { return fmt.Errorf("unable to list existing pods on node: %s, %w", @@ -231,7 +231,6 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I } natsToDelete = append(natsToDelete, routerNat) } - if len(natsToDelete) > 0 { err := libovsdbops.DeleteNATs(gw.nbClient, gatewayRouter, natsToDelete...) if err != nil { diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 866b6309fa..197ecf5c24 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -1259,6 +1259,12 @@ var _ = ginkgo.Describe("Default network controller operations", func() { newNodeSNAT("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP), newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), } + extraNatsWithMatch := []*nbdb.NAT{ // used for pod network advertised test + newNodeSNATWithMatch("stale-nodeNAT-UUID-1", "10.1.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-2", "10.2.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), + } ginkgo.DescribeTable( "reconciles pod network SNATs from syncGateway", func(condition func(*DefaultNetworkController) error, expectedExtraNATs ...*nbdb.NAT) { @@ -1284,7 +1290,11 @@ var _ = ginkgo.Describe("Default network controller operations", func() { GR := &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, } - err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNats...) + if !oc.isPodNetworkAdvertisedAtNode(node1.Name) { + err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNats...) + } else { + err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNatsWithMatch...) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) // ensure the stale SNAT's are cleaned up @@ -1366,7 +1376,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{"node1": {"vrf"}}) return oc.Reconcile(mutableNetInfo) }, - newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node + // won't be deleted on this node since this pod belongs to node-1 and is advertised so we keep this SNAT + newNodeSNATWithMatch("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to node-1 + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), ), ginkgo.Entry( "When pod network is advertised and DisableSNATMultipleGWs is false", @@ -1377,7 +1390,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{"node1": {"vrf"}}) return oc.Reconcile(mutableNetInfo) }, - newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node ), ) @@ -1982,6 +1995,12 @@ func newNodeSNAT(uuid, logicalIP, externalIP string) *nbdb.NAT { } } +func newNodeSNATWithMatch(uuid, logicalIP, externalIP, match string) *nbdb.NAT { + nat := newNodeSNAT(uuid, logicalIP, externalIP) + nat.Match = match + return nat +} + func TestController_syncNodes(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) From 5056d4da4e8177e32f9aa00121acc43d712d369a Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Wed, 23 Jul 2025 11:17:08 +0200 Subject: [PATCH 173/181] Fix CreateOrUpdateNATs to update non-default values When using the onModelUpdatesAllNonDefault() from NAT updates, it wasn't updating match value when we wanted to reset it. So when we went from advertised network to non-advertised network, we were not changing the SNAT match and hence traffic was still going out with podIP instead of nodeIP. This commit fixes that. Signed-off-by: Surya Seetharaman --- go-controller/pkg/libovsdb/ops/router.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index 27e8e38d48..5f0ce594d4 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -932,6 +932,11 @@ func RemoveLoadBalancersFromLogicalRouterOps(nbClient libovsdbclient.Client, ops return ops, err } +func getNATMutableFields(nat *nbdb.NAT) []interface{} { + return []interface{}{&nat.Type, &nat.ExternalIP, &nat.LogicalIP, &nat.LogicalPort, &nat.ExternalMAC, + &nat.ExternalIDs, &nat.Match, &nat.Options, &nat.ExternalPortRange, &nat.GatewayPort, &nat.Priority} +} + func buildNAT( natType nbdb.NATType, externalIP string, @@ -1152,7 +1157,7 @@ func CreateOrUpdateNATsOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation } opModel := operationModel{ Model: inputNat, - OnModelUpdates: onModelUpdatesAllNonDefault(), + OnModelUpdates: getNATMutableFields(inputNat), ErrNotFound: false, BulkOp: false, DoAfter: func() { router.Nat = append(router.Nat, inputNat.UUID) }, @@ -1280,7 +1285,7 @@ func UpdateNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, nats .. opModel := []operationModel{ { Model: nat, - OnModelUpdates: onModelUpdatesAllNonDefault(), + OnModelUpdates: getNATMutableFields(nat), ErrNotFound: true, BulkOp: false, }, From bcd06566dd363440e6e0719e9ddec35425bae5fa Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Mon, 28 Jul 2025 16:38:13 +0200 Subject: [PATCH 174/181] Bump OVN to 25.03 Signed-off-by: Surya Seetharaman --- dist/images/Dockerfile.fedora | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index 4ca51e888f..fc42191887 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -79,7 +79,7 @@ RUN git log -n 1 # Stage to download OVN RPMs from koji # ######################################## FROM quay.io/fedora/fedora:42 AS kojidownloader -ARG ovnver=ovn-24.09.2-71.fc42 +ARG ovnver=ovn-25.03.1-42.fc42 USER root From e8fc76449f4f2a3259c4576de41abe6f3cd2110f Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Mon, 28 Jul 2025 20:35:55 +0200 Subject: [PATCH 175/181] UDN,L2: UDN pod in networkA to nodePort on networkB works for IPV6! See https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5419 for details But the traffic flow looks like this for Layer3(v4 and v6) and Layer2(v4): pod in UDN A -> sameNodeIP:NodePort i.e 172.18.0.2:30724 pod (102.102.2.4)-> ovn-switch->ovn-cluster-router (SNAT to masqueradeIP 169.254.0.14)-> LRP send to mpX -> in the host (IPTable DNAT from nodePort to clusterIP 10.96.96.233:8080) send to breth0 breth0 flows reroute packet to UDN B's patchport hits the GR of UDNB and DNATs from clusterIP to backend pod that lives on another node (103.103.1.5) at the same time SNAT to joinIP in OVN router i.e 100.65.0.4 reponse comes back from remote pod and then we see ARP requests trying to understand how to reach the masqueradeIP of the other network which makes total sense - so reply fails NetworkB doesn't know how to reach back to NetworkA's masqueradeIP which is the srcIP. root@ovn-control-plane:/# tcpdump -i any -nneev port 36363 or port 30724 or host 102.102.2.4 or host 169.254.0.14 or host 100.65.0.4 tcpdump: data link type LINUX_SLL2 tcpdump: listening on any, link-type LINUX_SLL2 (Linux cooked v2), snapshot length 262144 bytes 08:55:14.083364 865a53b516350_3 P ifindex 19 0a:58:66:66:02:04 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 64, id 53100, offset 0, flags [DF], proto TCP (6), length 60) 102.102.2.4.42720 > 172.18.0.2.30724: Flags [S], cksum 0x14ad (incorrect -> 0x5e6c), seq 432663101, win 65280, options [mss 1360,sackOK,TS val 1239378349 ecr 0,nop,wscale 7], length 0 08:55:14.084049 ovn-k8s-mp2 In ifindex 14 0a:58:66:66:02:01 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 53100, offset 0, flags [DF], proto TCP (6), length 60) 169.254.0.14.42826 > 172.18.0.2.30724: Flags [S], cksum 0x1c60 (correct), seq 432663101, win 65280, options [mss 1360,sackOK,TS val 1239378349 ecr 0,nop,wscale 7], length 0 08:55:14.084069 breth0 Out ifindex 6 6a:ed:17:fb:28:bd ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 62, id 53100, offset 0, flags [DF], proto TCP (6), length 60) 169.254.0.14.42826 > 10.96.96.233.8080: Flags [S], cksum 0xb59f (correct), seq 432663101, win 65280, options [mss 1360,sackOK,TS val 1239378349 ecr 0,nop,wscale 7], length 0 08:55:14.084470 genev_sys_6081 Out ifindex 7 0a:58:64:58:00:04 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 60, id 53100, offset 0, flags [DF], proto TCP (6), length 60) 100.65.0.4.42826 > 103.103.1.5.8080: Flags [S], cksum 0xfe43 (correct), seq 432663101, win 65280, options [mss 1360,sackOK,TS val 1239378349 ecr 0,nop,wscale 7], length 0 08:55:14.085494 genev_sys_6081 P ifindex 7 0a:58:64:58:00:02 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 0, offset 0, flags [DF], proto TCP (6), length 60) 103.103.1.5.8080 > 100.65.0.4.42826: Flags [S.], cksum 0x1f4f (correct), seq 3390013464, ack 432663102, win 64704, options [mss 1360,sackOK,TS val 1866737591 ecr 1239378349,nop,wscale 7], length 0 08:55:14.086130 eth0 Out ifindex 2 6a:ed:17:fb:28:bd ethertype ARP (0x0806), length 48: Ethernet (len 6), IPv4 (len 4), Request who-has 169.254.0.14 tell 169.254.0.15, length 28 08:55:14.086172 breth0 B ifindex 6 6a:ed:17:fb:28:bd ethertype ARP (0x0806), length 48: Ethernet (len 6), IPv4 (len 4), Request who-has 169.254.0.14 tell 169.254.0.15, length 28 08:55:15.100558 genev_sys_6081 P ifindex 7 0a:58:64:58:00:02 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 0, offset 0, flags [DF], proto TCP (6), length 60) 103.103.1.5.8080 > 100.65.0.4.42826: Flags [S.], cksum 0xccdf (incorrect -> 0x1b57), seq 3390013464, ack 432663102, win 64704, options [mss 1360,sackOK,TS val 1866738607 ecr 1239378349,nop,wscale 7], length 0 08:55:15.101090 eth0 Out ifindex 2 6a:ed:17:fb:28:bd ethertype ARP (0x0806), length 48: Ethernet (len 6), IPv4 (len 4), Request who-has 169.254.0.14 tell 169.254.0.15, length 28 08:55:15.101124 breth0 B ifindex 6 6a:ed:17:fb:28:bd ethertype ARP (0x0806), length 48: Ethernet (len 6), IPv4 (len 4), Request who-has 169.254.0.14 tell 169.254.0.15, length 28 ^ its the same for Layer3 v6 as well and same for Layer2 v4 ^^ but Layer2 v6 is weird thanks to: // cookie=0xdeff105, duration=173.245s, table=1, n_packets=0, n_bytes=0, idle_age=173, priority=14,icmp6,icmp_type=134 actions=FLOOD // cookie=0xdeff105, duration=173.245s, table=1, n_packets=8, n_bytes=640, idle_age=4, priority=14,icmp6,icmp_type=136 actions=FLOOD these two flows on breth0 - these seem to be flooding the NDP requests between the GR's of all networks somehow and v6 works. So test is acknowledging this inconsistency and calling this out. Signed-off-by: Surya Seetharaman --- test/e2e/route_advertisements.go | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 026dfc5901..36c0c5c950 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -1035,15 +1035,40 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }), ginkgo.Entry("UDN pod to the same node nodeport service in different UDN network should not work", // FIXME: This test should work: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5419 + // This traffic flow is expected to work eventually but doesn't work today on Layer3 (v4 and v6) and Layer2 (v4 only) networks. + // Reason it doesn't work today is because UDN networks don't have MAC bindings for masqueradeIPs of other networks. + // Traffic flow: UDN pod in network A -> samenode nodeIP:nodePort service of networkB + // UDN pod in networkA -> ovn-switch -> ovn-cluster-router (SNAT to masqueradeIP of networkA) -> mpX interface -> + // enters the host and hits IPTables rules to DNAT to clusterIP:Port of service of networkB. + // Then it hits the pkt_mark flows on breth0 and get's sent into networkB's patchport where it hits the GR. + // On the GR we DNAT to backend pod and SNAT to joinIP. + // Reply: Pod replies and now OVN in networkB tries to ARP for the masqueradeIP of networkA which is the source and simply + // fails as it doesn't know how to reach this masqueradeIP. + // There is also inconsistency in behaviour within Layer2 networks for how IPv4 works and how IPv6 works where the traffic + // works on ipv6 because of the flows described below. func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeIP := node.Status.Addresses[ipFamilyIndex].Address nodePort := svcNetB.Spec.Ports[0].NodePort + out := curlConnectionTimeoutCode + errBool := true + if ipFamilyIndex == ipFamilyV6 && cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { + // For Layer2 networks, we have these flows we add on breth0: + // cookie=0xdeff105, duration=173.245s, table=1, n_packets=0, n_bytes=0, idle_age=173, priority=14,icmp6,icmp_type=134 actions=FLOOD + // cookie=0xdeff105, duration=173.245s, table=1, n_packets=8, n_bytes=640, idle_age=4, priority=14,icmp6,icmp_type=136 actions=FLOOD + // which floods the Router Advertisement (RA, type 134) and Neighbor Advertisement (NA, type 136) + // Given on Layer2 the GR has the SNATs for both masqueradeIPs this works perfectly well and + // the networks are able to NDP for the masqueradeIPs for the other networks. + // This doesn't work on Layer3 networks since masqueradeIP SNATs are present on the ovn-cluster-router in that case. + // See the tcpdump on the issue: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 for more details. + out = "" + errBool = false + } // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", curlConnectionTimeoutCode, true + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", out, errBool }), ginkgo.Entry("UDN pod to a different node nodeport service in different UDN network should work", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { From 9b21fc066d954bacd511a899c3bca464fc97e21a Mon Sep 17 00:00:00 2001 From: Surya Seetharaman Date: Tue, 29 Jul 2025 11:09:19 +0200 Subject: [PATCH 176/181] Change OVN-Kubernetes community meeting time Makes this EMEA/US friendly. Signed-off-by: Surya Seetharaman --- MEETINGS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MEETINGS.md b/MEETINGS.md index 701459788f..8964025628 100644 --- a/MEETINGS.md +++ b/MEETINGS.md @@ -6,7 +6,7 @@ All are welcome to join our meetings! If you want to discuss something with the ## Meeting time -We meet alternate Monday's at 6:00 PM CET/CEST. +We meet alternate Monday's at 5:00 PM CET/CEST. In order to figure out when our next meeting is, please check our agenda for previous meeting history. The meetings last up to 1 hour. From cc6fe11a70b5c215883f4abf9d099523cc457956 Mon Sep 17 00:00:00 2001 From: Miguel Duarte Barroso Date: Thu, 19 Jun 2025 10:48:16 +0100 Subject: [PATCH 177/181] udn, pre assigned port net ids: provision the default net NAD CR The "pre assigned port net ids" feature requires a NAD for the `default` network to be provisioned. This commit pre-provisions that NAD whenever the feature - EnableCustomNetworkConfig - is enabled, upon starting the cluster manager. Signed-off-by: Miguel Duarte Barroso --- .../pkg/clustermanager/clustermanager_test.go | 107 +++++++++++++++++- .../routeadvertisements/controller.go | 30 +---- .../routeadvertisements/controller_test.go | 2 +- .../userdefinednetwork/controller.go | 6 + go-controller/pkg/util/multi_network.go | 6 + go-controller/pkg/util/nad.go | 46 ++++++++ 6 files changed, 161 insertions(+), 36 deletions(-) create mode 100644 go-controller/pkg/util/nad.go diff --git a/go-controller/pkg/clustermanager/clustermanager_test.go b/go-controller/pkg/clustermanager/clustermanager_test.go index f97de8fc3f..66535f4c8a 100644 --- a/go-controller/pkg/clustermanager/clustermanager_test.go +++ b/go-controller/pkg/clustermanager/clustermanager_test.go @@ -34,10 +34,9 @@ const ( var _ = ginkgo.Describe("Cluster Manager", func() { var ( - app *cli.App - f *factory.WatchFactory - stopChan chan struct{} - wg *sync.WaitGroup + app *cli.App + f *factory.WatchFactory + wg *sync.WaitGroup ) const ( @@ -54,12 +53,10 @@ var _ = ginkgo.Describe("Cluster Manager", func() { app = cli.NewApp() app.Name = "test" app.Flags = config.Flags - stopChan = make(chan struct{}) wg = &sync.WaitGroup{} }) ginkgo.AfterEach(func() { - close(stopChan) if f != nil { f.Shutdown() } @@ -1436,4 +1433,102 @@ var _ = ginkgo.Describe("Cluster Manager", func() { }) }) + ginkgo.Context("starting the cluster manager", func() { + const networkName = "default" + + var fakeClient *util.OVNClusterManagerClientset + + ginkgo.BeforeEach(func() { + fakeClient = util.GetOVNClientset().GetClusterManagerClientset() + }) + + ginkgo.When("the required features are not enabled", func() { + ginkgo.It("does *not* automatically provision a NAD for the default network", func() { + app.Action = func(ctx *cli.Context) error { + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + f, err = factory.NewClusterManagerWatchFactory(fakeClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + clusterMngr, err := clusterManager(fakeClient, f) + gomega.Expect(clusterMngr).NotTo(gomega.BeNil()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(clusterMngr.Start(ctx.Context)).To(gomega.Succeed()) + + _, err = fakeClient.NetworkAttchDefClient. + K8sCniCncfIoV1(). + NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace). + Get( + context.Background(), + networkName, + metav1.GetOptions{}, + ) + gomega.Expect(err).To( + gomega.MatchError("network-attachment-definitions.k8s.cni.cncf.io \"default\" not found"), + ) + + return nil + } + gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + }) + }) + + ginkgo.When("the multi-network, network-segmentation, and preconfigured-udn-addresses features are enabled", func() { + ginkgo.BeforeEach(func() { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses = true + }) + + ginkgo.It("automatically provisions a NAD for the default network", func() { + app.Action = func(ctx *cli.Context) error { + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + f, err = factory.NewClusterManagerWatchFactory(fakeClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + clusterMngr, err := clusterManager(fakeClient, f) + gomega.Expect(clusterMngr).NotTo(gomega.BeNil()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + c, cancel := context.WithCancel(ctx.Context) + defer cancel() + gomega.Expect(clusterMngr.Start(c)).To(gomega.Succeed()) + defer clusterMngr.Stop() + + nad, err := fakeClient.NetworkAttchDefClient. + K8sCniCncfIoV1(). + NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace). + Get( + context.Background(), + networkName, + metav1.GetOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + const expectedNADContents = `{"cniVersion": "0.4.0", "name": "ovn-kubernetes", "type": "ovn-k8s-cni-overlay"}` + gomega.Expect(nad.Spec.Config).To(gomega.Equal(expectedNADContents)) + + return nil + } + gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + }) + }) + }) + }) + +func clusterManager(client *util.OVNClusterManagerClientset, f *factory.WatchFactory) (*ClusterManager, error) { + if err := f.Start(); err != nil { + return nil, fmt.Errorf("failed to start the CM watch factory: %w", err) + } + + clusterMngr, err := NewClusterManager(client, f, "identity", nil) + if err != nil { + return nil, fmt.Errorf("failed to start the CM watch factory: %w", err) + } + + return clusterMngr, nil +} diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 18fb3dbaae..903f5622f2 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -1016,7 +1016,7 @@ func (c *Controller) getSelectedNADs(networkSelectors apitypes.NetworkSelectors) case apitypes.DefaultNetwork: // if we are selecting the default networkdefault network label, // make sure a NAD exists for it - nad, err := c.getOrCreateDefaultNetworkNAD() + nad, err := util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient) if err != nil { return nil, fmt.Errorf("failed to get/create default network NAD: %w", err) } @@ -1047,34 +1047,6 @@ func (c *Controller) getSelectedNADs(networkSelectors apitypes.NetworkSelectors) return selected, nil } -// getOrCreateDefaultNetworkNAD ensure that a well-known NAD exists for the -// default network in ovn-k namespace. -func (c *Controller) getOrCreateDefaultNetworkNAD() (*nadtypes.NetworkAttachmentDefinition, error) { - nad, err := c.nadLister.NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Get(types.DefaultNetworkName) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - if nad != nil { - return nad, nil - } - return c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Create( - context.Background(), - &nadtypes.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: types.DefaultNetworkName, - Namespace: config.Kubernetes.OVNConfigNamespace, - }, - Spec: nadtypes.NetworkAttachmentDefinitionSpec{ - Config: fmt.Sprintf("{\"cniVersion\": \"0.4.0\", \"name\": \"ovn-kubernetes\", \"type\": \"%s\"}", config.CNI.Plugin), - }, - }, - // note we don't set ourselves as field manager for this create as we - // want to process the resulting event that would otherwise be filtered - // out in nadNeedsUpdate - metav1.CreateOptions{}, - ) -} - // getEgressIPsByNodesByNetworks iterates all existing egress IPs that apply to // any of the provided networks and returns a "node -> network -> eips" // map. diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go index 305418425c..86c711987e 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go @@ -1051,7 +1051,7 @@ func TestController_reconcile(t *testing.T) { g.Expect(err).ToNot(gomega.HaveOccurred()) // prime the default network NAD if defaultNAD == nil { - defaultNAD, err = c.getOrCreateDefaultNetworkNAD() + defaultNAD, err = util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient) g.Expect(err).ToNot(gomega.HaveOccurred()) // update it with the annotation that network manager would set defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go index 67292bd2ed..14963b9ed9 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go @@ -150,6 +150,12 @@ func (c *Controller) Run() error { return fmt.Errorf("unable to start user-defined network controller: %v", err) } + if util.IsPreconfiguredUDNAddressesEnabled() { + if _, err := util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient); err != nil { + return fmt.Errorf("failed to ensure default network nad exists: %w", err) + } + } + return nil } diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index fd91edd3be..d4f8b2a948 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -1378,6 +1378,12 @@ func IsRouteAdvertisementsEnabled() bool { return config.OVNKubernetesFeature.EnableMultiNetwork && config.OVNKubernetesFeature.EnableRouteAdvertisements } +// IsPreconfiguredUDNAddressesEnabled indicates if user defined IPs / MAC +// addresses can be set in primary UDNs +func IsPreconfiguredUDNAddressesEnabled() bool { + return IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses +} + func DoesNetworkRequireIPAM(netInfo NetInfo) bool { return !((netInfo.TopologyType() == types.Layer2Topology || netInfo.TopologyType() == types.LocalnetTopology) && len(netInfo.Subnets()) == 0) } diff --git a/go-controller/pkg/util/nad.go b/go-controller/pkg/util/nad.go new file mode 100644 index 0000000000..3a220e2b82 --- /dev/null +++ b/go-controller/pkg/util/nad.go @@ -0,0 +1,46 @@ +package util + +import ( + "context" + "fmt" + + nadtypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + nadclientset "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + nadlisters "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +// EnsureDefaultNetworkNAD ensures that a well-known NAD exists for the +// default network in ovn-k namespace. This will allow the users to customize +// the primary UDN attachments with static IPs, and/or MAC address requests, by +// using the multus-cni `default network` feature. +func EnsureDefaultNetworkNAD(nadLister nadlisters.NetworkAttachmentDefinitionLister, nadClient nadclientset.Interface) (*nadtypes.NetworkAttachmentDefinition, error) { + nad, err := nadLister.NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Get(types.DefaultNetworkName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if nad != nil { + return nad, nil + } + return nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Create( + context.Background(), + &nadtypes.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: types.DefaultNetworkName, + Namespace: config.Kubernetes.OVNConfigNamespace, + }, + Spec: nadtypes.NetworkAttachmentDefinitionSpec{ + Config: fmt.Sprintf("{\"cniVersion\": \"0.4.0\", \"name\": \"ovn-kubernetes\", \"type\": \"%s\"}", config.CNI.Plugin), + }, + }, + // note we don't set ourselves as field manager for this create as we + // want to process the resulting event that would otherwise be filtered + // out in nadNeedsUpdate + metav1.CreateOptions{}, + ) +} From b85c0f5f291fe964b68048878e79f5863358e2a8 Mon Sep 17 00:00:00 2001 From: Dave Tucker Date: Wed, 30 Jul 2025 11:03:20 +0100 Subject: [PATCH 178/181] chore: Update libovsdb bindings to ovn 25.03 Signed-off-by: Dave Tucker --- go-controller/Makefile | 3 +- go-controller/pkg/nbdb/load_balancer.go | 20 +- .../pkg/nbdb/logical_router_policy.go | 51 +++++ .../pkg/nbdb/logical_router_static_route.go | 67 +++++-- go-controller/pkg/nbdb/logical_switch_port.go | 25 +++ go-controller/pkg/nbdb/model.go | 62 +++++- go-controller/pkg/nbdb/ssl.go | 6 + go-controller/pkg/sbdb/acl_id.go | 54 ++++++ go-controller/pkg/sbdb/advertised_route.go | 124 ++++++++++++ go-controller/pkg/sbdb/ecmp_nexthop.go | 105 ++++++++++ go-controller/pkg/sbdb/learned_route.go | 105 ++++++++++ go-controller/pkg/sbdb/model.go | 180 +++++++++++++++++- go-controller/pkg/sbdb/ssl.go | 6 + 13 files changed, 780 insertions(+), 28 deletions(-) create mode 100644 go-controller/pkg/sbdb/acl_id.go create mode 100644 go-controller/pkg/sbdb/advertised_route.go create mode 100644 go-controller/pkg/sbdb/ecmp_nexthop.go create mode 100644 go-controller/pkg/sbdb/learned_route.go diff --git a/go-controller/Makefile b/go-controller/Makefile index 4c86486ce2..f27bb979e5 100644 --- a/go-controller/Makefile +++ b/go-controller/Makefile @@ -22,8 +22,7 @@ else CONTAINER_RUNTIME=docker endif CONTAINER_RUNNABLE ?= $(shell $(CONTAINER_RUNTIME) -v > /dev/null 2>&1; echo $$?) -# FIXME(tssurya): In one week when OVN 24.09 is released change the schema version -OVN_SCHEMA_VERSION ?= 8efac26f6637fc +OVN_SCHEMA_VERSION ?= v25.03.1 OVS_VERSION ?= v2.17.0 ifeq ($(NOROOT),TRUE) C_ARGS = -e NOROOT=TRUE diff --git a/go-controller/pkg/nbdb/load_balancer.go b/go-controller/pkg/nbdb/load_balancer.go index 8bddd25f4a..553bc48dda 100644 --- a/go-controller/pkg/nbdb/load_balancer.go +++ b/go-controller/pkg/nbdb/load_balancer.go @@ -13,15 +13,17 @@ type ( ) var ( - LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" - LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" - LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" - LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" - LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" - LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" - LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" - LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" - LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" + LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" + LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" + LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" + LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" + LoadBalancerSelectionFieldsIpv6Src LoadBalancerSelectionFields = "ipv6_src" + LoadBalancerSelectionFieldsIpv6Dst LoadBalancerSelectionFields = "ipv6_dst" + LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" + LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" ) // LoadBalancer defines an object in Load_Balancer table diff --git a/go-controller/pkg/nbdb/logical_router_policy.go b/go-controller/pkg/nbdb/logical_router_policy.go index 51b29ea706..377ef213d0 100644 --- a/go-controller/pkg/nbdb/logical_router_policy.go +++ b/go-controller/pkg/nbdb/logical_router_policy.go @@ -15,6 +15,7 @@ var ( LogicalRouterPolicyActionAllow LogicalRouterPolicyAction = "allow" LogicalRouterPolicyActionDrop LogicalRouterPolicyAction = "drop" LogicalRouterPolicyActionReroute LogicalRouterPolicyAction = "reroute" + LogicalRouterPolicyActionJump LogicalRouterPolicyAction = "jump" ) // LogicalRouterPolicy defines an object in Logical_Router_Policy table @@ -22,7 +23,9 @@ type LogicalRouterPolicy struct { UUID string `ovsdb:"_uuid"` Action LogicalRouterPolicyAction `ovsdb:"action"` BFDSessions []string `ovsdb:"bfd_sessions"` + Chain *string `ovsdb:"chain"` ExternalIDs map[string]string `ovsdb:"external_ids"` + JumpChain *string `ovsdb:"jump_chain"` Match string `ovsdb:"match"` Nexthop *string `ovsdb:"nexthop"` Nexthops []string `ovsdb:"nexthops"` @@ -66,6 +69,28 @@ func equalLogicalRouterPolicyBFDSessions(a, b []string) bool { return true } +func (a *LogicalRouterPolicy) GetChain() *string { + return a.Chain +} + +func copyLogicalRouterPolicyChain(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyChain(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalRouterPolicy) GetExternalIDs() map[string]string { return a.ExternalIDs } @@ -96,6 +121,28 @@ func equalLogicalRouterPolicyExternalIDs(a, b map[string]string) bool { return true } +func (a *LogicalRouterPolicy) GetJumpChain() *string { + return a.JumpChain +} + +func copyLogicalRouterPolicyJumpChain(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyJumpChain(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalRouterPolicy) GetMatch() string { return a.Match } @@ -187,7 +234,9 @@ func (a *LogicalRouterPolicy) GetPriority() int { func (a *LogicalRouterPolicy) DeepCopyInto(b *LogicalRouterPolicy) { *b = *a b.BFDSessions = copyLogicalRouterPolicyBFDSessions(a.BFDSessions) + b.Chain = copyLogicalRouterPolicyChain(a.Chain) b.ExternalIDs = copyLogicalRouterPolicyExternalIDs(a.ExternalIDs) + b.JumpChain = copyLogicalRouterPolicyJumpChain(a.JumpChain) b.Nexthop = copyLogicalRouterPolicyNexthop(a.Nexthop) b.Nexthops = copyLogicalRouterPolicyNexthops(a.Nexthops) b.Options = copyLogicalRouterPolicyOptions(a.Options) @@ -212,7 +261,9 @@ func (a *LogicalRouterPolicy) Equals(b *LogicalRouterPolicy) bool { return a.UUID == b.UUID && a.Action == b.Action && equalLogicalRouterPolicyBFDSessions(a.BFDSessions, b.BFDSessions) && + equalLogicalRouterPolicyChain(a.Chain, b.Chain) && equalLogicalRouterPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterPolicyJumpChain(a.JumpChain, b.JumpChain) && a.Match == b.Match && equalLogicalRouterPolicyNexthop(a.Nexthop, b.Nexthop) && equalLogicalRouterPolicyNexthops(a.Nexthops, b.Nexthops) && diff --git a/go-controller/pkg/nbdb/logical_router_static_route.go b/go-controller/pkg/nbdb/logical_router_static_route.go index 205741626c..ceccb8ac78 100644 --- a/go-controller/pkg/nbdb/logical_router_static_route.go +++ b/go-controller/pkg/nbdb/logical_router_static_route.go @@ -8,25 +8,36 @@ import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route" type ( - LogicalRouterStaticRoutePolicy = string + LogicalRouterStaticRoutePolicy = string + LogicalRouterStaticRouteSelectionFields = string ) var ( - LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" - LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" + LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" + LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" + LogicalRouterStaticRouteSelectionFieldsEthSrc LogicalRouterStaticRouteSelectionFields = "eth_src" + LogicalRouterStaticRouteSelectionFieldsEthDst LogicalRouterStaticRouteSelectionFields = "eth_dst" + LogicalRouterStaticRouteSelectionFieldsIPProto LogicalRouterStaticRouteSelectionFields = "ip_proto" + LogicalRouterStaticRouteSelectionFieldsIPSrc LogicalRouterStaticRouteSelectionFields = "ip_src" + LogicalRouterStaticRouteSelectionFieldsIPDst LogicalRouterStaticRouteSelectionFields = "ip_dst" + LogicalRouterStaticRouteSelectionFieldsIpv6Src LogicalRouterStaticRouteSelectionFields = "ipv6_src" + LogicalRouterStaticRouteSelectionFieldsIpv6Dst LogicalRouterStaticRouteSelectionFields = "ipv6_dst" + LogicalRouterStaticRouteSelectionFieldsTpSrc LogicalRouterStaticRouteSelectionFields = "tp_src" + LogicalRouterStaticRouteSelectionFieldsTpDst LogicalRouterStaticRouteSelectionFields = "tp_dst" ) // LogicalRouterStaticRoute defines an object in Logical_Router_Static_Route table type LogicalRouterStaticRoute struct { - UUID string `ovsdb:"_uuid"` - BFD *string `ovsdb:"bfd"` - ExternalIDs map[string]string `ovsdb:"external_ids"` - IPPrefix string `ovsdb:"ip_prefix"` - Nexthop string `ovsdb:"nexthop"` - Options map[string]string `ovsdb:"options"` - OutputPort *string `ovsdb:"output_port"` - Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` - RouteTable string `ovsdb:"route_table"` + UUID string `ovsdb:"_uuid"` + BFD *string `ovsdb:"bfd"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + Nexthop string `ovsdb:"nexthop"` + Options map[string]string `ovsdb:"options"` + OutputPort *string `ovsdb:"output_port"` + Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` + RouteTable string `ovsdb:"route_table"` + SelectionFields []LogicalRouterStaticRouteSelectionFields `ovsdb:"selection_fields"` } func (a *LogicalRouterStaticRoute) GetUUID() string { @@ -171,6 +182,34 @@ func (a *LogicalRouterStaticRoute) GetRouteTable() string { return a.RouteTable } +func (a *LogicalRouterStaticRoute) GetSelectionFields() []LogicalRouterStaticRouteSelectionFields { + return a.SelectionFields +} + +func copyLogicalRouterStaticRouteSelectionFields(a []LogicalRouterStaticRouteSelectionFields) []LogicalRouterStaticRouteSelectionFields { + if a == nil { + return nil + } + b := make([]LogicalRouterStaticRouteSelectionFields, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterStaticRouteSelectionFields(a, b []LogicalRouterStaticRouteSelectionFields) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { *b = *a b.BFD = copyLogicalRouterStaticRouteBFD(a.BFD) @@ -178,6 +217,7 @@ func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { b.Options = copyLogicalRouterStaticRouteOptions(a.Options) b.OutputPort = copyLogicalRouterStaticRouteOutputPort(a.OutputPort) b.Policy = copyLogicalRouterStaticRoutePolicy(a.Policy) + b.SelectionFields = copyLogicalRouterStaticRouteSelectionFields(a.SelectionFields) } func (a *LogicalRouterStaticRoute) DeepCopy() *LogicalRouterStaticRoute { @@ -204,7 +244,8 @@ func (a *LogicalRouterStaticRoute) Equals(b *LogicalRouterStaticRoute) bool { equalLogicalRouterStaticRouteOptions(a.Options, b.Options) && equalLogicalRouterStaticRouteOutputPort(a.OutputPort, b.OutputPort) && equalLogicalRouterStaticRoutePolicy(a.Policy, b.Policy) && - a.RouteTable == b.RouteTable + a.RouteTable == b.RouteTable && + equalLogicalRouterStaticRouteSelectionFields(a.SelectionFields, b.SelectionFields) } func (a *LogicalRouterStaticRoute) EqualsModel(b model.Model) bool { diff --git a/go-controller/pkg/nbdb/logical_switch_port.go b/go-controller/pkg/nbdb/logical_switch_port.go index b211672bff..87994fdc72 100644 --- a/go-controller/pkg/nbdb/logical_switch_port.go +++ b/go-controller/pkg/nbdb/logical_switch_port.go @@ -21,6 +21,7 @@ type LogicalSwitchPort struct { Name string `ovsdb:"name"` Options map[string]string `ovsdb:"options"` ParentName *string `ovsdb:"parent_name"` + Peer *string `ovsdb:"peer"` PortSecurity []string `ovsdb:"port_security"` Tag *int `ovsdb:"tag"` TagRequest *int `ovsdb:"tag_request"` @@ -284,6 +285,28 @@ func equalLogicalSwitchPortParentName(a, b *string) bool { return *a == *b } +func (a *LogicalSwitchPort) GetPeer() *string { + return a.Peer +} + +func copyLogicalSwitchPortPeer(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortPeer(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalSwitchPort) GetPortSecurity() []string { return a.PortSecurity } @@ -394,6 +417,7 @@ func (a *LogicalSwitchPort) DeepCopyInto(b *LogicalSwitchPort) { b.MirrorRules = copyLogicalSwitchPortMirrorRules(a.MirrorRules) b.Options = copyLogicalSwitchPortOptions(a.Options) b.ParentName = copyLogicalSwitchPortParentName(a.ParentName) + b.Peer = copyLogicalSwitchPortPeer(a.Peer) b.PortSecurity = copyLogicalSwitchPortPortSecurity(a.PortSecurity) b.Tag = copyLogicalSwitchPortTag(a.Tag) b.TagRequest = copyLogicalSwitchPortTagRequest(a.TagRequest) @@ -428,6 +452,7 @@ func (a *LogicalSwitchPort) Equals(b *LogicalSwitchPort) bool { a.Name == b.Name && equalLogicalSwitchPortOptions(a.Options, b.Options) && equalLogicalSwitchPortParentName(a.ParentName, b.ParentName) && + equalLogicalSwitchPortPeer(a.Peer, b.Peer) && equalLogicalSwitchPortPortSecurity(a.PortSecurity, b.PortSecurity) && equalLogicalSwitchPortTag(a.Tag, b.Tag) && equalLogicalSwitchPortTagRequest(a.TagRequest, b.TagRequest) && diff --git a/go-controller/pkg/nbdb/model.go b/go-controller/pkg/nbdb/model.go index 9fbe25db4f..07ca7e0e97 100644 --- a/go-controller/pkg/nbdb/model.go +++ b/go-controller/pkg/nbdb/model.go @@ -52,7 +52,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { var schema = `{ "name": "OVN_Northbound", - "version": "7.6.0", + "version": "7.11.0", "tables": { "ACL": { "columns": { @@ -819,6 +819,8 @@ var schema = `{ "eth_dst", "ip_src", "ip_dst", + "ipv6_src", + "ipv6_dst", "tp_src", "tp_dst" ] @@ -1026,7 +1028,8 @@ var schema = `{ [ "allow", "drop", - "reroute" + "reroute", + "jump" ] ] } @@ -1043,6 +1046,15 @@ var schema = `{ "max": "unlimited" } }, + "chain": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "external_ids": { "type": { "key": { @@ -1055,6 +1067,15 @@ var schema = `{ "max": "unlimited" } }, + "jump_chain": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "match": { "type": "string" }, @@ -1187,7 +1208,7 @@ var schema = `{ "key": { "type": "string" }, - "min": 1, + "min": 0, "max": "unlimited" } }, @@ -1301,6 +1322,29 @@ var schema = `{ }, "route_table": { "type": "string" + }, + "selection_fields": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "eth_src", + "eth_dst", + "ip_proto", + "ip_src", + "ip_dst", + "ipv6_src", + "ipv6_dst", + "tp_src", + "tp_dst" + ] + ] + }, + "min": 0, + "max": "unlimited" + } } } }, @@ -1532,6 +1576,15 @@ var schema = `{ "max": 1 } }, + "peer": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "port_security": { "type": { "key": { @@ -2092,6 +2145,9 @@ var schema = `{ "ssl_ciphers": { "type": "string" }, + "ssl_ciphersuites": { + "type": "string" + }, "ssl_protocols": { "type": "string" } diff --git a/go-controller/pkg/nbdb/ssl.go b/go-controller/pkg/nbdb/ssl.go index 847ea8c362..0f01efc978 100644 --- a/go-controller/pkg/nbdb/ssl.go +++ b/go-controller/pkg/nbdb/ssl.go @@ -16,6 +16,7 @@ type SSL struct { ExternalIDs map[string]string `ovsdb:"external_ids"` PrivateKey string `ovsdb:"private_key"` SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLCiphersuites string `ovsdb:"ssl_ciphersuites"` SSLProtocols string `ovsdb:"ssl_protocols"` } @@ -73,6 +74,10 @@ func (a *SSL) GetSSLCiphers() string { return a.SSLCiphers } +func (a *SSL) GetSSLCiphersuites() string { + return a.SSLCiphersuites +} + func (a *SSL) GetSSLProtocols() string { return a.SSLProtocols } @@ -105,6 +110,7 @@ func (a *SSL) Equals(b *SSL) bool { equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && a.PrivateKey == b.PrivateKey && a.SSLCiphers == b.SSLCiphers && + a.SSLCiphersuites == b.SSLCiphersuites && a.SSLProtocols == b.SSLProtocols } diff --git a/go-controller/pkg/sbdb/acl_id.go b/go-controller/pkg/sbdb/acl_id.go new file mode 100644 index 0000000000..5c62c53fe2 --- /dev/null +++ b/go-controller/pkg/sbdb/acl_id.go @@ -0,0 +1,54 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const ACLIDTable = "ACL_ID" + +// ACLID defines an object in ACL_ID table +type ACLID struct { + UUID string `ovsdb:"_uuid"` + ID int `ovsdb:"id"` +} + +func (a *ACLID) GetUUID() string { + return a.UUID +} + +func (a *ACLID) GetID() int { + return a.ID +} + +func (a *ACLID) DeepCopyInto(b *ACLID) { + *b = *a +} + +func (a *ACLID) DeepCopy() *ACLID { + b := new(ACLID) + a.DeepCopyInto(b) + return b +} + +func (a *ACLID) CloneModelInto(b model.Model) { + c := b.(*ACLID) + a.DeepCopyInto(c) +} + +func (a *ACLID) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ACLID) Equals(b *ACLID) bool { + return a.UUID == b.UUID && + a.ID == b.ID +} + +func (a *ACLID) EqualsModel(b model.Model) bool { + c := b.(*ACLID) + return a.Equals(c) +} + +var _ model.CloneableModel = &ACLID{} +var _ model.ComparableModel = &ACLID{} diff --git a/go-controller/pkg/sbdb/advertised_route.go b/go-controller/pkg/sbdb/advertised_route.go new file mode 100644 index 0000000000..6704be7d4d --- /dev/null +++ b/go-controller/pkg/sbdb/advertised_route.go @@ -0,0 +1,124 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const AdvertisedRouteTable = "Advertised_Route" + +// AdvertisedRoute defines an object in Advertised_Route table +type AdvertisedRoute struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + LogicalPort string `ovsdb:"logical_port"` + TrackedPort *string `ovsdb:"tracked_port"` +} + +func (a *AdvertisedRoute) GetUUID() string { + return a.UUID +} + +func (a *AdvertisedRoute) GetDatapath() string { + return a.Datapath +} + +func (a *AdvertisedRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyAdvertisedRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalAdvertisedRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *AdvertisedRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *AdvertisedRoute) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *AdvertisedRoute) GetTrackedPort() *string { + return a.TrackedPort +} + +func copyAdvertisedRouteTrackedPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalAdvertisedRouteTrackedPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *AdvertisedRoute) DeepCopyInto(b *AdvertisedRoute) { + *b = *a + b.ExternalIDs = copyAdvertisedRouteExternalIDs(a.ExternalIDs) + b.TrackedPort = copyAdvertisedRouteTrackedPort(a.TrackedPort) +} + +func (a *AdvertisedRoute) DeepCopy() *AdvertisedRoute { + b := new(AdvertisedRoute) + a.DeepCopyInto(b) + return b +} + +func (a *AdvertisedRoute) CloneModelInto(b model.Model) { + c := b.(*AdvertisedRoute) + a.DeepCopyInto(c) +} + +func (a *AdvertisedRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AdvertisedRoute) Equals(b *AdvertisedRoute) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalAdvertisedRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.LogicalPort == b.LogicalPort && + equalAdvertisedRouteTrackedPort(a.TrackedPort, b.TrackedPort) +} + +func (a *AdvertisedRoute) EqualsModel(b model.Model) bool { + c := b.(*AdvertisedRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &AdvertisedRoute{} +var _ model.ComparableModel = &AdvertisedRoute{} diff --git a/go-controller/pkg/sbdb/ecmp_nexthop.go b/go-controller/pkg/sbdb/ecmp_nexthop.go new file mode 100644 index 0000000000..2b0124a788 --- /dev/null +++ b/go-controller/pkg/sbdb/ecmp_nexthop.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const ECMPNexthopTable = "ECMP_Nexthop" + +// ECMPNexthop defines an object in ECMP_Nexthop table +type ECMPNexthop struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + MAC string `ovsdb:"mac"` + Nexthop string `ovsdb:"nexthop"` + Port string `ovsdb:"port"` +} + +func (a *ECMPNexthop) GetUUID() string { + return a.UUID +} + +func (a *ECMPNexthop) GetDatapath() string { + return a.Datapath +} + +func (a *ECMPNexthop) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyECMPNexthopExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalECMPNexthopExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ECMPNexthop) GetMAC() string { + return a.MAC +} + +func (a *ECMPNexthop) GetNexthop() string { + return a.Nexthop +} + +func (a *ECMPNexthop) GetPort() string { + return a.Port +} + +func (a *ECMPNexthop) DeepCopyInto(b *ECMPNexthop) { + *b = *a + b.ExternalIDs = copyECMPNexthopExternalIDs(a.ExternalIDs) +} + +func (a *ECMPNexthop) DeepCopy() *ECMPNexthop { + b := new(ECMPNexthop) + a.DeepCopyInto(b) + return b +} + +func (a *ECMPNexthop) CloneModelInto(b model.Model) { + c := b.(*ECMPNexthop) + a.DeepCopyInto(c) +} + +func (a *ECMPNexthop) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ECMPNexthop) Equals(b *ECMPNexthop) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalECMPNexthopExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.MAC == b.MAC && + a.Nexthop == b.Nexthop && + a.Port == b.Port +} + +func (a *ECMPNexthop) EqualsModel(b model.Model) bool { + c := b.(*ECMPNexthop) + return a.Equals(c) +} + +var _ model.CloneableModel = &ECMPNexthop{} +var _ model.ComparableModel = &ECMPNexthop{} diff --git a/go-controller/pkg/sbdb/learned_route.go b/go-controller/pkg/sbdb/learned_route.go new file mode 100644 index 0000000000..8cab3636de --- /dev/null +++ b/go-controller/pkg/sbdb/learned_route.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const LearnedRouteTable = "Learned_Route" + +// LearnedRoute defines an object in Learned_Route table +type LearnedRoute struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + LogicalPort string `ovsdb:"logical_port"` + Nexthop string `ovsdb:"nexthop"` +} + +func (a *LearnedRoute) GetUUID() string { + return a.UUID +} + +func (a *LearnedRoute) GetDatapath() string { + return a.Datapath +} + +func (a *LearnedRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLearnedRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLearnedRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LearnedRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *LearnedRoute) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *LearnedRoute) GetNexthop() string { + return a.Nexthop +} + +func (a *LearnedRoute) DeepCopyInto(b *LearnedRoute) { + *b = *a + b.ExternalIDs = copyLearnedRouteExternalIDs(a.ExternalIDs) +} + +func (a *LearnedRoute) DeepCopy() *LearnedRoute { + b := new(LearnedRoute) + a.DeepCopyInto(b) + return b +} + +func (a *LearnedRoute) CloneModelInto(b model.Model) { + c := b.(*LearnedRoute) + a.DeepCopyInto(c) +} + +func (a *LearnedRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LearnedRoute) Equals(b *LearnedRoute) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalLearnedRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.LogicalPort == b.LogicalPort && + a.Nexthop == b.Nexthop +} + +func (a *LearnedRoute) EqualsModel(b model.Model) bool { + c := b.(*LearnedRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &LearnedRoute{} +var _ model.ComparableModel = &LearnedRoute{} diff --git a/go-controller/pkg/sbdb/model.go b/go-controller/pkg/sbdb/model.go index c5420638e5..0d9fe177bf 100644 --- a/go-controller/pkg/sbdb/model.go +++ b/go-controller/pkg/sbdb/model.go @@ -13,7 +13,9 @@ import ( // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb func FullDatabaseModel() (model.ClientDBModel, error) { return model.NewClientDBModel("OVN_Southbound", map[string]model.Model{ + "ACL_ID": &ACLID{}, "Address_Set": &AddressSet{}, + "Advertised_Route": &AdvertisedRoute{}, "BFD": &BFD{}, "Chassis": &Chassis{}, "Chassis_Private": &ChassisPrivate{}, @@ -24,6 +26,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "DHCPv6_Options": &DHCPv6Options{}, "DNS": &DNS{}, "Datapath_Binding": &DatapathBinding{}, + "ECMP_Nexthop": &ECMPNexthop{}, "Encap": &Encap{}, "FDB": &FDB{}, "Gateway_Chassis": &GatewayChassis{}, @@ -31,6 +34,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "HA_Chassis_Group": &HAChassisGroup{}, "IGMP_Group": &IGMPGroup{}, "IP_Multicast": &IPMulticast{}, + "Learned_Route": &LearnedRoute{}, "Load_Balancer": &LoadBalancer{}, "Logical_DP_Group": &LogicalDPGroup{}, "Logical_Flow": &LogicalFlow{}, @@ -52,8 +56,22 @@ func FullDatabaseModel() (model.ClientDBModel, error) { var schema = `{ "name": "OVN_Southbound", - "version": "20.37.0", + "version": "20.41.0", "tables": { + "ACL_ID": { + "columns": { + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "isRoot": true + }, "Address_Set": { "columns": { "addresses": { @@ -76,6 +94,63 @@ var schema = `{ ], "isRoot": true }, + "Advertised_Route": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + } + } + }, + "tracked_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath", + "logical_port", + "ip_prefix", + "tracked_port" + ] + ], + "isRoot": true + }, "BFD": { "columns": { "chassis_name": { @@ -576,6 +651,57 @@ var schema = `{ ], "isRoot": true }, + "ECMP_Nexthop": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + }, + "min": 1, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mac": { + "type": "string" + }, + "nexthop": { + "type": "string" + }, + "port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + }, + "min": 1, + "max": 1 + } + } + }, + "indexes": [ + [ + "nexthop", + "port" + ] + ], + "isRoot": true + }, "Encap": { "columns": { "chassis_name": { @@ -932,6 +1058,55 @@ var schema = `{ ], "isRoot": true }, + "Learned_Route": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + } + } + }, + "nexthop": { + "type": "string" + } + }, + "indexes": [ + [ + "datapath", + "logical_port", + "ip_prefix", + "nexthop" + ] + ], + "isRoot": true + }, "Load_Balancer": { "columns": { "datapath_group": { @@ -1741,6 +1916,9 @@ var schema = `{ "ssl_ciphers": { "type": "string" }, + "ssl_ciphersuites": { + "type": "string" + }, "ssl_protocols": { "type": "string" } diff --git a/go-controller/pkg/sbdb/ssl.go b/go-controller/pkg/sbdb/ssl.go index 08c8e641cf..eccda6dff3 100644 --- a/go-controller/pkg/sbdb/ssl.go +++ b/go-controller/pkg/sbdb/ssl.go @@ -16,6 +16,7 @@ type SSL struct { ExternalIDs map[string]string `ovsdb:"external_ids"` PrivateKey string `ovsdb:"private_key"` SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLCiphersuites string `ovsdb:"ssl_ciphersuites"` SSLProtocols string `ovsdb:"ssl_protocols"` } @@ -73,6 +74,10 @@ func (a *SSL) GetSSLCiphers() string { return a.SSLCiphers } +func (a *SSL) GetSSLCiphersuites() string { + return a.SSLCiphersuites +} + func (a *SSL) GetSSLProtocols() string { return a.SSLProtocols } @@ -105,6 +110,7 @@ func (a *SSL) Equals(b *SSL) bool { equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && a.PrivateKey == b.PrivateKey && a.SSLCiphers == b.SSLCiphers && + a.SSLCiphersuites == b.SSLCiphersuites && a.SSLProtocols == b.SSLProtocols } From 03ccdf9884148ef741c82205618388c4a3d4fedf Mon Sep 17 00:00:00 2001 From: nithyar Date: Tue, 29 Jul 2025 08:07:51 -0700 Subject: [PATCH 179/181] Bump ubuntu to 25.04 Signed-off-by: nithyar --- dist/images/Dockerfile.ubuntu | 4 +--- dist/images/Dockerfile.ubuntu.arm64 | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dist/images/Dockerfile.ubuntu b/dist/images/Dockerfile.ubuntu index 10addc57d4..7fedefa624 100644 --- a/dist/images/Dockerfile.ubuntu +++ b/dist/images/Dockerfile.ubuntu @@ -8,14 +8,12 @@ # # So this file will change over time. -FROM ubuntu:24.10 +FROM ubuntu:25.04 USER root RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux nftables -RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - # Install OVS and OVN packages. RUN apt-get update && apt-get install -y openvswitch-switch openvswitch-common ovn-central ovn-common ovn-host diff --git a/dist/images/Dockerfile.ubuntu.arm64 b/dist/images/Dockerfile.ubuntu.arm64 index 48a408b036..3830641cf0 100644 --- a/dist/images/Dockerfile.ubuntu.arm64 +++ b/dist/images/Dockerfile.ubuntu.arm64 @@ -8,14 +8,12 @@ # # So this file will change over time. -FROM ubuntu:24.10 +FROM ubuntu:25.04 USER root RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux nftables -RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - # Install OVS and OVN packages. RUN apt-get update && apt-get install -y openvswitch-switch openvswitch-common ovn-central ovn-common ovn-host From c0fad85f2931a070b318a9b02913046e138e4769 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Tue, 29 Jul 2025 07:09:29 +0530 Subject: [PATCH 180/181] Remove NetworkUnavailable condition from node The NodeNetworkUnavailable condition can be set after ovn-k processed the node successfully so we cannot do the early exit without checking for this. Order of events: 1. Node is added without the NodeNetworkUnavailable condition 2. OVN-Kubernetes reconciles the node 3. Condition is added by an external entity 4. We never remove it because we exit early Hence this commit adds NodeNetworkUnavailable condition check for node update event and ensures h.clearInitialNodeNetworkUnavailableCondition method is called at least once to clear this condition. Signed-off-by: Periyasamy Palanisamy --- .../pkg/clustermanager/network_cluster_controller.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index ef2ac665ae..f31e9ec8aa 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -16,6 +16,7 @@ import ( cache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + k8snodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" @@ -576,7 +577,10 @@ func (h *networkClusterControllerEventHandler) UpdateResource(oldObj, newObj int // 1. we missed an add event (bug in kapi informer code) // 2. a user removed the annotation on the node // Either way to play it safe for now do a partial json unmarshal check - if !nodeFailed && util.NoHostSubnet(oldNode) == util.NoHostSubnet(newNode) && !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) { + _, nodeCondition := k8snodeutil.GetNodeCondition(&newNode.Status, corev1.NodeNetworkUnavailable) + nodeNetworkUnavailable := nodeCondition != nil && nodeCondition.Status == corev1.ConditionTrue + if !nodeFailed && util.NoHostSubnet(oldNode) == util.NoHostSubnet(newNode) && + !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) && !nodeNetworkUnavailable { // no other node updates would require us to reconcile again return nil } From bfe436834fa1aa8a1de22c5c791d7da15e131fa4 Mon Sep 17 00:00:00 2001 From: Jamo Luhrsen Date: Tue, 12 Aug 2025 22:39:49 -0700 Subject: [PATCH 181/181] add back the removed OCP hack from d/s merge one OCP hack was removed in the last d/s merge process [0] the current d/s merge is using 'git merge -X theirs' to ensure we get exactly what is upstream and will have to be re-worked to prevent this in the future. the change that was made upstream that caused this was a refactor for gw init and DPU host handling [1] that came in recently. this commit adds the OCP hack back as well as keeping the changes introduced upstream with [1] [0] https://github.com/openshift/ovn-kubernetes/pull/2693/files#diff-d09b4698b05e3cc5ad6d020187ffb80247f0ed6f784d61a93ee4e28742e3f827 [1] https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5307/commits/5b5bc069fd729ef6921b01ac25e26220f052b9bd#diff-d09b4698b05e3cc5ad6d020187ffb80247f0ed6f784d61a93ee4e28742e3f827 Signed-off-by: Jamo Luhrsen --- .../pkg/ovn/controller/services/lb_config.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/go-controller/pkg/ovn/controller/services/lb_config.go b/go-controller/pkg/ovn/controller/services/lb_config.go index e7c1372f20..b6bbd833ba 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config.go +++ b/go-controller/pkg/ovn/controller/services/lb_config.go @@ -105,6 +105,21 @@ func makeNodeRouterTargetIPs(service *corev1.Service, node *nodeInfo, c *lbConfi targetIPsV6 = localIPsV6 } + // OCP HACK BEGIN + if _, set := service.Annotations[localWithFallbackAnnotation]; set && c.externalTrafficLocal { + // if service is annotated and is ETP=local, fallback to ETP=cluster on nodes with no local endpoints: + // include endpoints from other nodes + if len(targetIPsV4) == 0 { + zeroRouterLocalEndpointsV4 = true + targetIPsV4 = c.clusterEndpoints.V4IPs + } + if len(targetIPsV6) == 0 { + zeroRouterLocalEndpointsV6 = true + targetIPsV6 = c.clusterEndpoints.V6IPs + } + } + // OCP HACK END + // TODO: For all scenarios the lbAddress should be set to hostAddressesStr but this is breaking CI needs more investigation lbAddresses := node.hostAddressesStr() if config.OvnKubeNode.Mode == types.NodeModeFull {